batch_merge_results.py 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730
  1. #!/usr/bin/env python3
  2. """
  3. 批量合并 OCR 结果
  4. 自动读取配置文件,对所有 VL 处理器的输出进行 bbox 合并
  5. 支持执行器输出日志重定向
  6. """
  7. import os
  8. import sys
  9. import yaml
  10. import argparse
  11. import subprocess
  12. from pathlib import Path
  13. from datetime import datetime
  14. from typing import Dict, List, Tuple, Optional, Any
  15. from dataclasses import dataclass
  16. import logging
  17. from tqdm import tqdm
  18. # 添加 merger 模块路径
  19. sys.path.insert(0, str(Path(__file__).parent.parent / 'merger'))
  20. @dataclass
  21. class MergeTask:
  22. """合并任务"""
  23. processor_name: str
  24. vl_result_dir: Path
  25. paddle_result_dir: Path
  26. output_dir: Path
  27. merger_script: str
  28. description: str
  29. log_file: str = "" # 🎯 新增:日志文件路径
  30. class BatchMerger:
  31. """批量合并器"""
  32. # VL 处理器类型映射到合并脚本
  33. MERGER_SCRIPTS = {
  34. 'paddleocr_vl': 'merge_paddleocr_vl_paddleocr.py',
  35. 'mineru': 'merge_mineru_paddle_ocr.py',
  36. 'dotsocr': 'merge_mineru_paddle_ocr.py', # DotsOCR 也用 MinerU 格式
  37. }
  38. def __init__(self, config_file: str, base_dir: str = None):
  39. """
  40. Args:
  41. config_file: processor_configs.yaml 路径
  42. base_dir: PDF 基础目录,覆盖配置文件中的设置
  43. """
  44. self.config_file = Path(config_file)
  45. self.config = self._load_config()
  46. self.base_dir = Path(base_dir) if base_dir else Path(self.config['global']['base_dir'])
  47. # 🎯 日志基础目录
  48. self.log_base_dir = self.base_dir / self.config['global'].get('log_dir', 'logs')
  49. # 设置日志
  50. self.logger = self._setup_logger()
  51. # merger 脚本目录
  52. self.merger_dir = Path(__file__).parent.parent / 'merger'
  53. # 🎯 统计信息
  54. self.merge_results: List[Dict[str, Any]] = []
  55. def _load_config(self) -> Dict:
  56. """加载配置文件"""
  57. with open(self.config_file, 'r', encoding='utf-8') as f:
  58. return yaml.safe_load(f)
  59. def _setup_logger(self) -> logging.Logger:
  60. """设置日志"""
  61. logger = logging.getLogger('BatchMerger')
  62. logger.setLevel(logging.INFO)
  63. if not logger.handlers:
  64. console_handler = logging.StreamHandler()
  65. console_handler.setLevel(logging.INFO)
  66. formatter = logging.Formatter(
  67. '%(asctime)s - %(levelname)s - %(message)s',
  68. datefmt='%Y-%m-%d %H:%M:%S'
  69. )
  70. console_handler.setFormatter(formatter)
  71. logger.addHandler(console_handler)
  72. return logger
  73. def _detect_processor_type(self, processor_name: str) -> str:
  74. """
  75. 检测处理器类型
  76. Returns:
  77. 'paddleocr_vl', 'mineru', 'dotsocr', 'ppstructure' 等
  78. """
  79. name_lower = processor_name.lower()
  80. if 'paddleocr_vl' in name_lower or 'paddleocr-vl' in name_lower:
  81. return 'paddleocr_vl'
  82. elif 'mineru' in name_lower:
  83. return 'mineru'
  84. elif 'dotsocr' in name_lower or 'dots' in name_lower:
  85. return 'dotsocr'
  86. elif 'ppstructure' in name_lower or 'pp-structure' in name_lower:
  87. return 'ppstructure'
  88. else:
  89. return 'unknown'
  90. def _get_merger_script(self, processor_type: str) -> str:
  91. """获取合并脚本路径"""
  92. script_name = self.MERGER_SCRIPTS.get(processor_type)
  93. if not script_name:
  94. return None
  95. script_path = self.merger_dir / script_name
  96. return str(script_path) if script_path.exists() else None
  97. def _find_paddle_result_dir(self, pdf_dir: Path) -> Path:
  98. """
  99. 查找对应的 PaddleOCR 结果目录
  100. 优先级:
  101. 1. ppstructurev3_cpu_results (本地 CPU)
  102. 2. ppstructurev3_results (默认)
  103. 3. data_PPStructureV3_Results (旧格式)
  104. """
  105. candidates = [
  106. pdf_dir / 'ppstructurev3_client_results',
  107. pdf_dir / 'ppstructurev3_single_process_results',
  108. ]
  109. for candidate in candidates:
  110. if candidate.exists():
  111. return candidate
  112. return None
  113. def _get_log_file_path(self, pdf_dir: Path, processor_name: str) -> Path:
  114. """
  115. 🎯 获取合并任务的日志文件路径
  116. 日志结构:
  117. PDF目录/
  118. └── logs/
  119. └── merge_processor_name/
  120. └── PDF名称_merge_YYYYMMDD_HHMMSS.log
  121. """
  122. # 日志目录
  123. log_dir = pdf_dir / 'logs' / f'merge_{processor_name}'
  124. log_dir.mkdir(parents=True, exist_ok=True)
  125. # 日志文件名
  126. timestamp = datetime.now().strftime('%Y%m%d_%H%M%S')
  127. log_file = log_dir / f"{pdf_dir.name}_merge_{timestamp}.log"
  128. return log_file
  129. def discover_merge_tasks(
  130. self,
  131. pdf_list: List[str] = None,
  132. processors: List[str] = None
  133. ) -> List[MergeTask]:
  134. """
  135. 自动发现需要合并的任务
  136. Args:
  137. pdf_list: PDF 文件列表(不含扩展名),如 ['德_内蒙古银行照', ...]
  138. processors: 处理器列表,如 ['paddleocr_vl_single_process', ...]
  139. Returns:
  140. MergeTask 列表
  141. """
  142. tasks = []
  143. # 如果没有指定处理器,扫描所有 VL 类型的处理器
  144. if not processors:
  145. processors = []
  146. for proc_name, proc_config in self.config['processors'].items():
  147. proc_type = self._detect_processor_type(proc_name)
  148. if proc_type in ['paddleocr_vl', 'mineru', 'dotsocr']:
  149. processors.append(proc_name)
  150. # 如果没有指定 PDF 列表,扫描基础目录
  151. if not pdf_list:
  152. pdf_list = [d.name for d in self.base_dir.iterdir() if d.is_dir()]
  153. self.logger.info(f"📂 基础目录: {self.base_dir}")
  154. self.logger.info(f"🔍 发现 {len(pdf_list)} 个 PDF 目录")
  155. self.logger.info(f"⚙️ 发现 {len(processors)} 个 VL 处理器")
  156. # 遍历每个 PDF 目录和处理器组合
  157. for pdf_name in pdf_list:
  158. pdf_dir = self.base_dir / pdf_name
  159. if not pdf_dir.exists():
  160. self.logger.warning(f"⚠️ 目录不存在: {pdf_dir}")
  161. continue
  162. # 查找 PaddleOCR 结果目录
  163. paddle_result_dir = self._find_paddle_result_dir(pdf_dir)
  164. if not paddle_result_dir:
  165. self.logger.warning(f"⚠️ 未找到 PaddleOCR 结果: {pdf_name}")
  166. continue
  167. # 遍历每个 VL 处理器
  168. for proc_name in processors:
  169. if proc_name not in self.config['processors']:
  170. self.logger.warning(f"⚠️ 处理器不存在: {proc_name}")
  171. continue
  172. proc_config = self.config['processors'][proc_name]
  173. proc_type = self._detect_processor_type(proc_name)
  174. # 获取合并脚本
  175. merger_script = self._get_merger_script(proc_type)
  176. if not merger_script:
  177. self.logger.warning(f"⚠️ 不支持的处理器类型: {proc_name} ({proc_type})")
  178. continue
  179. # VL 结果目录
  180. vl_output_subdir = proc_config.get('output_subdir', f'{proc_name}_results')
  181. vl_result_dir = pdf_dir / vl_output_subdir
  182. if not vl_result_dir.exists():
  183. self.logger.debug(f"⏭️ VL 结果不存在: {vl_result_dir}")
  184. continue
  185. # 输出目录
  186. output_dir = pdf_dir / f"{vl_output_subdir}_cell_bbox"
  187. # 🎯 日志文件路径
  188. log_file = self._get_log_file_path(pdf_dir, proc_name)
  189. # 创建任务
  190. task = MergeTask(
  191. processor_name=proc_name,
  192. vl_result_dir=vl_result_dir,
  193. paddle_result_dir=paddle_result_dir,
  194. output_dir=output_dir,
  195. merger_script=merger_script,
  196. description=proc_config.get('description', proc_name),
  197. log_file=str(log_file) # 🎯 新增
  198. )
  199. tasks.append(task)
  200. return tasks
  201. def execute_merge_task(
  202. self,
  203. task: MergeTask,
  204. window: int = 15,
  205. threshold: int = 85,
  206. output_type: str = 'both',
  207. dry_run: bool = False
  208. ) -> Dict[str, Any]:
  209. """
  210. 🎯 执行单个合并任务(支持日志重定向)
  211. Args:
  212. task: 合并任务
  213. window: 查找窗口
  214. threshold: 相似度阈值
  215. output_type: 输出格式
  216. dry_run: 模拟运行
  217. Returns:
  218. 执行结果字典
  219. """
  220. self.logger.info(f"\n{'='*80}")
  221. self.logger.info(f"📄 处理: {task.vl_result_dir.parent.name}")
  222. self.logger.info(f"🔧 处理器: {task.description}")
  223. self.logger.info(f"📂 VL 结果: {task.vl_result_dir}")
  224. self.logger.info(f"📂 PaddleOCR 结果: {task.paddle_result_dir}")
  225. self.logger.info(f"📂 输出目录: {task.output_dir}")
  226. self.logger.info(f"📄 日志文件: {task.log_file}")
  227. self.logger.info(f"{'='*80}")
  228. # 构建命令
  229. cmd = [
  230. sys.executable, # 当前 Python 解释器
  231. task.merger_script,
  232. f"--{self._get_vl_arg_name(task.merger_script)}-dir", str(task.vl_result_dir),
  233. '--paddle-dir', str(task.paddle_result_dir),
  234. '--output-dir', str(task.output_dir),
  235. '--output-type', output_type,
  236. '--window', str(window),
  237. '--threshold', str(threshold)
  238. ]
  239. if dry_run:
  240. self.logger.info(f"[DRY RUN] 命令: {' '.join(cmd)}")
  241. return {
  242. 'task': task,
  243. 'success': True,
  244. 'duration': 0,
  245. 'error': '',
  246. 'dry_run': True
  247. }
  248. # 🎯 执行命令并重定向输出到日志文件
  249. import time
  250. start_time = time.time()
  251. try:
  252. with open(task.log_file, 'w', encoding='utf-8') as log_f:
  253. # 写入日志头
  254. log_f.write(f"{'='*80}\n")
  255. log_f.write(f"合并任务日志\n")
  256. log_f.write(f"{'='*80}\n\n")
  257. log_f.write(f"PDF 目录: {task.vl_result_dir.parent}\n")
  258. log_f.write(f"处理器: {task.description}\n")
  259. log_f.write(f"处理器名称: {task.processor_name}\n")
  260. log_f.write(f"VL 结果目录: {task.vl_result_dir}\n")
  261. log_f.write(f"PaddleOCR 结果目录: {task.paddle_result_dir}\n")
  262. log_f.write(f"输出目录: {task.output_dir}\n")
  263. log_f.write(f"合并脚本: {task.merger_script}\n")
  264. log_f.write(f"查找窗口: {window}\n")
  265. log_f.write(f"相似度阈值: {threshold}\n")
  266. log_f.write(f"输出格式: {output_type}\n")
  267. log_f.write(f"开始时间: {datetime.now()}\n")
  268. log_f.write(f"{'='*80}\n\n")
  269. log_f.flush()
  270. # 执行命令
  271. result = subprocess.run(
  272. cmd,
  273. stdout=log_f, # 🎯 重定向 stdout
  274. stderr=subprocess.STDOUT, # 🎯 合并 stderr 到 stdout
  275. text=True,
  276. check=True
  277. )
  278. # 写入日志尾
  279. log_f.write(f"\n{'='*80}\n")
  280. log_f.write(f"结束时间: {datetime.now()}\n")
  281. log_f.write(f"状态: 成功\n")
  282. log_f.write(f"{'='*80}\n")
  283. duration = time.time() - start_time
  284. self.logger.info(f"✅ 合并成功 (耗时: {duration:.2f}秒)")
  285. return {
  286. 'task': task,
  287. 'success': True,
  288. 'duration': duration,
  289. 'error': '',
  290. 'dry_run': False
  291. }
  292. except subprocess.CalledProcessError as e:
  293. duration = time.time() - start_time
  294. error_msg = f"命令执行失败 (退出码: {e.returncode})"
  295. # 🎯 在日志文件中追加错误信息
  296. with open(task.log_file, 'a', encoding='utf-8') as log_f:
  297. log_f.write(f"\n{'='*80}\n")
  298. log_f.write(f"结束时间: {datetime.now()}\n")
  299. log_f.write(f"状态: 失败\n")
  300. log_f.write(f"错误: {error_msg}\n")
  301. log_f.write(f"{'='*80}\n")
  302. self.logger.error(f"❌ 合并失败 (耗时: {duration:.2f}秒)")
  303. self.logger.error(f"错误信息: {error_msg}")
  304. self.logger.error(f"详细日志: {task.log_file}")
  305. return {
  306. 'task': task,
  307. 'success': False,
  308. 'duration': duration,
  309. 'error': error_msg,
  310. 'dry_run': False
  311. }
  312. except Exception as e:
  313. duration = time.time() - start_time
  314. error_msg = str(e)
  315. with open(task.log_file, 'a', encoding='utf-8') as log_f:
  316. log_f.write(f"\n{'='*80}\n")
  317. log_f.write(f"结束时间: {datetime.now()}\n")
  318. log_f.write(f"状态: 异常\n")
  319. log_f.write(f"错误: {error_msg}\n")
  320. log_f.write(f"{'='*80}\n")
  321. self.logger.error(f"❌ 合并异常 (耗时: {duration:.2f}秒)")
  322. self.logger.error(f"错误信息: {error_msg}")
  323. self.logger.error(f"详细日志: {task.log_file}")
  324. return {
  325. 'task': task,
  326. 'success': False,
  327. 'duration': duration,
  328. 'error': error_msg,
  329. 'dry_run': False
  330. }
  331. def _get_vl_arg_name(self, merger_script: str) -> str:
  332. """获取 VL 参数名称"""
  333. script_name = Path(merger_script).stem
  334. if 'paddleocr_vl' in script_name:
  335. return 'paddleocr-vl'
  336. elif 'mineru' in script_name:
  337. return 'mineru'
  338. else:
  339. return 'vl'
  340. def _save_summary_log(self, stats: Dict[str, Any]):
  341. """🎯 保存汇总日志"""
  342. timestamp = datetime.now().strftime('%Y%m%d_%H%M%S')
  343. summary_log_file = self.log_base_dir / f"merge_batch_summary_{timestamp}.log"
  344. # 确保目录存在
  345. summary_log_file.parent.mkdir(parents=True, exist_ok=True)
  346. with open(summary_log_file, 'w', encoding='utf-8') as f:
  347. f.write("OCR 结果批量合并汇总日志\n")
  348. f.write("=" * 80 + "\n\n")
  349. f.write(f"配置文件: {self.config_file}\n")
  350. f.write(f"基础目录: {self.base_dir}\n")
  351. f.write(f"日志目录: {self.log_base_dir}\n")
  352. f.write(f"开始时间: {datetime.now()}\n")
  353. f.write(f"总耗时: {stats['total_duration']:.2f} 秒\n\n")
  354. f.write("统计信息:\n")
  355. f.write(f" 总任务数: {stats['total']}\n")
  356. f.write(f" 成功: {stats['success']}\n")
  357. f.write(f" 失败: {stats['failed']}\n\n")
  358. if stats['failed_tasks']:
  359. f.write("失败的任务:\n")
  360. for item in stats['failed_tasks']:
  361. f.write(f" ✗ {item['pdf_dir']} / {item['processor']}\n")
  362. f.write(f" 错误: {item['error']}\n")
  363. f.write(f" 日志: {item['log']}\n\n")
  364. f.write("详细结果:\n")
  365. for result in self.merge_results:
  366. task = result['task']
  367. status = "✓" if result['success'] else "✗"
  368. f.write(f"{status} {task.vl_result_dir.parent.name} / {task.processor_name} ({result['duration']:.2f}s)\n")
  369. f.write(f" 日志: {task.log_file}\n")
  370. if result['error']:
  371. f.write(f" 错误: {result['error']}\n")
  372. self.logger.info(f"汇总日志已保存: {summary_log_file}")
  373. def batch_merge(
  374. self,
  375. pdf_list: List[str] = None,
  376. processors: List[str] = None,
  377. window: int = 15,
  378. threshold: int = 85,
  379. output_type: str = 'both',
  380. dry_run: bool = False
  381. ) -> Dict:
  382. """
  383. 批量合并
  384. Returns:
  385. 统计信息字典
  386. """
  387. # 发现任务
  388. tasks = self.discover_merge_tasks(pdf_list, processors)
  389. if not tasks:
  390. self.logger.warning("⚠️ 没有发现任何合并任务")
  391. return {
  392. 'total': 0,
  393. 'success': 0,
  394. 'failed': 0,
  395. 'total_duration': 0,
  396. 'failed_tasks': []
  397. }
  398. self.logger.info(f"\n🎯 发现 {len(tasks)} 个合并任务\n")
  399. # 显示任务列表
  400. for i, task in enumerate(tasks, 1):
  401. self.logger.info(f"{i}. {task.vl_result_dir.parent.name} / {task.processor_name}")
  402. # 确认执行
  403. if not dry_run:
  404. confirm = input(f"\n是否继续执行 {len(tasks)} 个合并任务? [Y/n]: ")
  405. if confirm.lower() not in ['', 'y', 'yes']:
  406. self.logger.info("❌ 已取消")
  407. return {
  408. 'total': 0,
  409. 'success': 0,
  410. 'failed': 0,
  411. 'total_duration': 0,
  412. 'failed_tasks': []
  413. }
  414. # 执行任务
  415. import time
  416. batch_start_time = time.time()
  417. success_count = 0
  418. failed_count = 0
  419. with tqdm(total=len(tasks), desc="合并进度", unit="task") as pbar:
  420. for task in tasks:
  421. result = self.execute_merge_task(
  422. task,
  423. window=window,
  424. threshold=threshold,
  425. output_type=output_type,
  426. dry_run=dry_run
  427. )
  428. self.merge_results.append(result)
  429. if result['success']:
  430. success_count += 1
  431. else:
  432. failed_count += 1
  433. pbar.update(1)
  434. pbar.set_postfix({
  435. 'success': success_count,
  436. 'failed': failed_count
  437. })
  438. total_duration = time.time() - batch_start_time
  439. # 统计失败任务
  440. failed_tasks = [
  441. {
  442. 'pdf_dir': r['task'].vl_result_dir.parent.name,
  443. 'processor': r['task'].processor_name,
  444. 'error': r['error'],
  445. 'log': r['task'].log_file
  446. }
  447. for r in self.merge_results if not r['success']
  448. ]
  449. # 统计信息
  450. stats = {
  451. 'total': len(tasks),
  452. 'success': success_count,
  453. 'failed': failed_count,
  454. 'total_duration': total_duration,
  455. 'failed_tasks': failed_tasks
  456. }
  457. # 🎯 保存汇总日志
  458. self._save_summary_log(stats)
  459. # 打印总结
  460. self.logger.info(f"\n{'='*80}")
  461. self.logger.info("📊 合并完成")
  462. self.logger.info(f" 总任务数: {stats['total']}")
  463. self.logger.info(f" ✅ 成功: {stats['success']}")
  464. self.logger.info(f" ❌ 失败: {stats['failed']}")
  465. self.logger.info(f" ⏱️ 总耗时: {stats['total_duration']:.2f} 秒")
  466. self.logger.info(f"{'='*80}")
  467. if failed_tasks:
  468. self.logger.info(f"\n失败的任务:")
  469. for item in failed_tasks:
  470. self.logger.info(f" ✗ {item['pdf_dir']} / {item['processor']}")
  471. self.logger.info(f" 错误: {item['error']}")
  472. self.logger.info(f" 日志: {item['log']}")
  473. return stats
  474. def create_parser() -> argparse.ArgumentParser:
  475. """创建命令行参数解析器"""
  476. parser = argparse.ArgumentParser(
  477. description='批量合并 OCR 结果(VL + PaddleOCR)',
  478. formatter_class=argparse.RawDescriptionHelpFormatter,
  479. epilog="""
  480. 示例用法:
  481. 1. 合并配置文件中所有 VL 处理器的结果:
  482. python batch_merge_results.py
  483. 2. 合并指定 PDF 的结果:
  484. python batch_merge_results.py -f pdf_list.txt
  485. 3. 合并指定处理器的结果:
  486. python batch_merge_results.py -p paddleocr_vl_single_process -p mineru_vllm
  487. 4. 自定义参数:
  488. python batch_merge_results.py -w 20 -t 90
  489. 5. 模拟运行(不实际执行):
  490. python batch_merge_results.py --dry-run
  491. """
  492. )
  493. # 配置文件
  494. parser.add_argument(
  495. '-c', '--config',
  496. default='processor_configs.yaml',
  497. help='配置文件路径 (默认: processor_configs.yaml)'
  498. )
  499. # PDF 和处理器
  500. parser.add_argument(
  501. '-d', '--base-dir',
  502. help='PDF 基础目录(覆盖配置文件)'
  503. )
  504. parser.add_argument(
  505. '-f', '--file-list',
  506. help='PDF 列表文件(每行一个 PDF 名称,不含扩展名)'
  507. )
  508. parser.add_argument(
  509. '-l', '--pdf-list',
  510. nargs='+',
  511. help='PDF 名称列表(不含扩展名)'
  512. )
  513. parser.add_argument(
  514. '-p', '--processors',
  515. nargs='+',
  516. help='处理器列表(不指定则自动检测所有 VL 处理器)'
  517. )
  518. # 合并参数
  519. parser.add_argument(
  520. '-w', '--window',
  521. type=int,
  522. default=15,
  523. help='查找窗口大小 (默认: 15)'
  524. )
  525. parser.add_argument(
  526. '-t', '--threshold',
  527. type=int,
  528. default=85,
  529. help='相似度阈值 (默认: 85)'
  530. )
  531. parser.add_argument(
  532. '--output-type',
  533. choices=['json', 'markdown', 'both'],
  534. default='both',
  535. help='输出格式 (默认: both)'
  536. )
  537. # 工具选项
  538. parser.add_argument(
  539. '--dry-run',
  540. action='store_true',
  541. help='模拟运行,不实际执行'
  542. )
  543. parser.add_argument(
  544. '-v', '--verbose',
  545. action='store_true',
  546. help='详细输出'
  547. )
  548. return parser
  549. def main():
  550. """主函数"""
  551. parser = create_parser()
  552. args = parser.parse_args()
  553. # 设置日志级别
  554. if args.verbose:
  555. logging.getLogger().setLevel(logging.DEBUG)
  556. # 读取 PDF 列表
  557. pdf_list = None
  558. if args.file_list:
  559. pdf_list = []
  560. with open(args.file_list, 'r', encoding='utf-8') as f:
  561. for line in f:
  562. line = line.strip()
  563. if line and not line.startswith('#'):
  564. # 移除 .pdf 扩展名
  565. pdf_name = line.replace('.pdf', '')
  566. pdf_list.append(pdf_name)
  567. elif args.pdf_list:
  568. pdf_list = [p.replace('.pdf', '') for p in args.pdf_list]
  569. # 创建批量合并器
  570. merger = BatchMerger(
  571. config_file=args.config,
  572. base_dir=args.base_dir
  573. )
  574. # 执行批量合并
  575. stats = merger.batch_merge(
  576. pdf_list=pdf_list,
  577. processors=args.processors,
  578. window=args.window,
  579. threshold=args.threshold,
  580. output_type=args.output_type,
  581. dry_run=args.dry_run
  582. )
  583. return 0 if stats['failed'] == 0 else 1
  584. if __name__ == '__main__':
  585. print("🚀 启动批量OCR bbox 合并程序...")
  586. import sys
  587. if len(sys.argv) == 1:
  588. # 如果没有命令行参数,使用默认配置运行
  589. print("ℹ️ 未提供命令行参数,使用默认配置运行...")
  590. # 默认配置
  591. default_config = {
  592. "file-list": "pdf_list.txt",
  593. }
  594. print("⚙️ 默认参数:")
  595. for key, value in default_config.items():
  596. print(f" --{key}: {value}")
  597. # 构造参数
  598. sys.argv = [sys.argv[0]]
  599. for key, value in default_config.items():
  600. sys.argv.extend([f"--{key}", str(value)])
  601. sys.argv.append("--dry-run")
  602. sys.argv.append("--verbose") # 添加详细输出参数
  603. sys.exit(main())