merge_mineru_paddle_ocr.py 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684
  1. """
  2. 合并 MinerU 和 PaddleOCR 的结果
  3. 使用 MinerU 的表格结构识别 + PaddleOCR 的文字框坐标
  4. """
  5. import json
  6. import re
  7. import argparse
  8. from pathlib import Path
  9. from typing import List, Dict, Tuple, Optional
  10. from bs4 import BeautifulSoup
  11. from fuzzywuzzy import fuzz
  12. import shutil
  13. class MinerUPaddleOCRMerger:
  14. """合并 MinerU 和 PaddleOCR 的结果"""
  15. def __init__(self, look_ahead_window: int = 10, similarity_threshold: int = 90):
  16. """
  17. Args:
  18. look_ahead_window: 向前查找的窗口大小
  19. similarity_threshold: 文本相似度阈值
  20. """
  21. self.look_ahead_window = look_ahead_window
  22. self.similarity_threshold = similarity_threshold
  23. def merge_table_with_bbox(self, mineru_json_path: str, paddle_json_path: str) -> List[Dict]:
  24. """
  25. 合并 MinerU 和 PaddleOCR 的结果
  26. Args:
  27. mineru_json_path: MinerU 输出的 JSON 路径
  28. paddle_json_path: PaddleOCR 输出的 JSON 路径
  29. output_path: 输出路径(可选)
  30. Returns:
  31. 合并后的结果字典
  32. """
  33. merged_data = None
  34. # 加载数据
  35. with open(mineru_json_path, 'r', encoding='utf-8') as f:
  36. mineru_data = json.load(f)
  37. with open(paddle_json_path, 'r', encoding='utf-8') as f:
  38. paddle_data = json.load(f)
  39. # 提取 PaddleOCR 的文字框信息
  40. paddle_text_boxes = self._extract_paddle_text_boxes(paddle_data)
  41. # 处理 MinerU 的数据
  42. merged_data = self._process_mineru_data(mineru_data, paddle_text_boxes)
  43. return merged_data
  44. def _extract_paddle_text_boxes(self, paddle_data: Dict) -> List[Dict]:
  45. """提取 PaddleOCR 的文字框信息"""
  46. text_boxes = []
  47. if 'overall_ocr_res' in paddle_data:
  48. ocr_res = paddle_data['overall_ocr_res']
  49. rec_texts = ocr_res.get('rec_texts', [])
  50. rec_polys = ocr_res.get('rec_polys', [])
  51. rec_scores = ocr_res.get('rec_scores', [])
  52. for i, (text, poly, score) in enumerate(zip(rec_texts, rec_polys, rec_scores)):
  53. if text and text.strip():
  54. # 计算 bbox (x_min, y_min, x_max, y_max)
  55. xs = [p[0] for p in poly]
  56. ys = [p[1] for p in poly]
  57. bbox = [min(xs), min(ys), max(xs), max(ys)]
  58. text_boxes.append({
  59. 'text': text,
  60. 'bbox': bbox,
  61. 'poly': poly,
  62. 'score': score,
  63. 'paddle_bbox_index': i,
  64. 'used': False # 标记是否已被使用
  65. })
  66. return text_boxes
  67. def _process_mineru_data(self, mineru_data: List[Dict],
  68. paddle_text_boxes: List[Dict]) -> List[Dict]:
  69. """处理 MinerU 数据,添加 bbox 信息
  70. Args:
  71. mineru_data (List[Dict]): _description_
  72. paddle_text_boxes (List[Dict]): _description_
  73. Returns:
  74. List[Dict]: _description_
  75. """
  76. merged_data = []
  77. cells = None # 存储所有表格单元格信息
  78. paddle_pointer = 0 # PaddleOCR 文字框指针
  79. last_matched_index = 0 # 上次匹配成功的索引
  80. # 对mineru_data按bbox从上到下排序,从左到右确保顺序一致
  81. mineru_data.sort(key=lambda x: (x['bbox'][1], x['bbox'][0]) if 'bbox' in x else (float('inf'), float('inf')))
  82. for item in mineru_data:
  83. if item['type'] == 'table':
  84. # 处理表格
  85. merged_item = item.copy()
  86. table_html = item.get('table_body', '')
  87. # 解析 HTML 表格并添加 bbox
  88. enhanced_html, cells, paddle_pointer = self._enhance_table_html_with_bbox(
  89. table_html, paddle_text_boxes, paddle_pointer
  90. )
  91. merged_item['table_body'] = enhanced_html
  92. merged_item['table_body_with_bbox'] = enhanced_html
  93. merged_item['bbox_mapping'] = 'merged_from_paddle_ocr'
  94. merged_item['table_cells'] = cells if cells else []
  95. merged_data.append(merged_item)
  96. elif item['type'] in ['text', 'header']:
  97. # 处理普通文本
  98. merged_item = item.copy()
  99. text = item.get('text', '')
  100. # 查找匹配的 bbox
  101. matched_bbox, paddle_pointer, last_matched_index = self._find_matching_bbox(
  102. text, paddle_text_boxes, paddle_pointer, last_matched_index
  103. )
  104. if matched_bbox:
  105. merged_item['bbox'] = matched_bbox['bbox']
  106. merged_item['bbox_source'] = 'paddle_ocr'
  107. merged_item['text_score'] = matched_bbox['score']
  108. # 标记为已使用
  109. matched_bbox['used'] = True
  110. merged_data.append(merged_item)
  111. else:
  112. # 其他类型直接复制
  113. merged_data.append(item.copy())
  114. return merged_data
  115. def _enhance_table_html_with_bbox(self, html: str, paddle_text_boxes: List[Dict],
  116. start_pointer: int) -> Tuple[str, List[Dict], int]:
  117. """
  118. 为 HTML 表格添加 bbox 信息
  119. Args:
  120. html: 原始 HTML 表格
  121. paddle_text_boxes: PaddleOCR 文字框列表
  122. start_pointer: 起始指针位置
  123. Returns:
  124. (增强后的 HTML, 单元格数组, 新的指针位置)
  125. """
  126. # 需要处理minerU识别为2个连着的cell,如: -741.00|357,259.63, paddle识别为一个cell,如: -741.00357,259.63
  127. soup = BeautifulSoup(html, 'html.parser')
  128. current_pointer = start_pointer
  129. last_matched_index = start_pointer
  130. cells = [] # 存储单元格的 bbox 信息
  131. # 遍历所有行
  132. for row_idx, row in enumerate(soup.find_all('tr')):
  133. # 遍历所有单元格
  134. for col_idx, cell in enumerate(row.find_all(['td', 'th'])):
  135. cell_text = cell.get_text(strip=True)
  136. if not cell_text:
  137. continue
  138. # 查找匹配的 bbox
  139. matched_bbox, current_pointer, last_matched_index = self._find_matching_bbox(
  140. cell_text, paddle_text_boxes, current_pointer, last_matched_index
  141. )
  142. if matched_bbox:
  143. # 添加 data-bbox 属性
  144. bbox = matched_bbox['bbox']
  145. cell['data-bbox'] = f"[{bbox[0]},{bbox[1]},{bbox[2]},{bbox[3]}]"
  146. cell['data-score'] = f"{matched_bbox['score']:.4f}"
  147. cell['data-paddle-index'] = str(matched_bbox['paddle_bbox_index'])
  148. cells.append({
  149. 'type': 'table_cell',
  150. 'text': cell_text,
  151. 'bbox': bbox,
  152. 'row': row_idx+1,
  153. 'col': col_idx+1,
  154. 'score': matched_bbox['score'],
  155. 'paddle_bbox_index': matched_bbox['paddle_bbox_index']
  156. })
  157. # 标记为已使用
  158. matched_bbox['used'] = True
  159. return str(soup), cells, current_pointer
  160. def _find_matching_bbox(self, target_text: str, text_boxes: List[Dict],
  161. start_index: int, last_match_index: int) -> tuple[Optional[Dict], int, int]:
  162. """
  163. 查找匹配的文字框
  164. Args:
  165. target_text: 目标文本
  166. text_boxes: 文字框列表
  167. start_index: 起始索引, 是最后一个used=True的位置+1
  168. last_match_index: 上次匹配成功的索引, 可能比start_index小
  169. Returns:
  170. (匹配的文字框信息, 新的指针位置, last_match_index)
  171. """
  172. target_text = self._normalize_text(target_text)
  173. # 过滤过短的目标文本
  174. if len(target_text) < 2:
  175. return None, start_index, last_match_index
  176. # 由于minerU和Paddle的顺序基本一致, 也有不一致的地方, 所以需要向前找第一个未使用的位置
  177. # MinerU和Paddle都可能识别错误,所以需要一个look_ahead_window来避免漏掉匹配
  178. search_start = last_match_index - 1
  179. unused_count = 0
  180. while search_start >= 0:
  181. if text_boxes[search_start]['used'] == False:
  182. unused_count += 1
  183. if unused_count >= self.look_ahead_window:
  184. break
  185. search_start -= 1
  186. if search_start < 0:
  187. search_start = 0
  188. search_end = min(start_index + self.look_ahead_window, len(text_boxes))
  189. best_match = None
  190. best_index = start_index
  191. for i in range(search_start, search_end):
  192. if text_boxes[i]['used']:
  193. continue
  194. box_text = self._normalize_text(text_boxes[i]['text'])
  195. # 过滤过短的候选文本(避免单字符匹配)
  196. if len(box_text) < 2:
  197. continue
  198. # 长度比例检查 - 避免长度差异过大的匹配
  199. length_ratio = min(len(target_text), len(box_text)) / max(len(target_text), len(box_text))
  200. if length_ratio < 0.3: # 长度差异超过70%则跳过
  201. continue
  202. # 精确匹配优先
  203. if target_text == box_text:
  204. if i >= start_index:
  205. return text_boxes[i], i + 1, i
  206. else:
  207. return text_boxes[i], start_index, i
  208. # 计算多种相似度
  209. partial_ratio = fuzz.partial_ratio(target_text, box_text)
  210. token_sort_ratio = fuzz.token_sort_ratio(target_text, box_text)
  211. # 综合相似度 - 两种算法都要达到阈值
  212. if (partial_ratio >= self.similarity_threshold and
  213. token_sort_ratio >= 50): # token_sort 阈值稍低
  214. return text_boxes[i], start_index, last_match_index
  215. return best_match, best_index, last_match_index
  216. def _normalize_text(self, text: str) -> str:
  217. """标准化文本(去除空格、标点等)"""
  218. # 移除所有空白字符
  219. text = re.sub(r'\s+', '', text)
  220. # 转换全角数字和字母为半角
  221. text = self._full_to_half(text)
  222. return text.lower()
  223. def _full_to_half(self, text: str) -> str:
  224. """全角转半角"""
  225. result = []
  226. for char in text:
  227. code = ord(char)
  228. if code == 0x3000: # 全角空格
  229. code = 0x0020
  230. elif 0xFF01 <= code <= 0xFF5E: # 全角字符
  231. code -= 0xFEE0
  232. result.append(chr(code))
  233. return ''.join(result)
  234. def generate_enhanced_markdown(self, merged_data: List[Dict],
  235. output_path: Optional[str] = None, mineru_file: Optional[str] = None) -> str:
  236. """
  237. 生成增强的 Markdown(包含 bbox 信息的注释)
  238. Args:
  239. merged_data: 合并后的数据
  240. output_path: 输出路径(可选)
  241. Returns:
  242. Markdown 内容
  243. """
  244. md_lines = []
  245. for item in merged_data:
  246. if item['type'] == 'header':
  247. text = item.get('text', '')
  248. bbox = item.get('bbox', [])
  249. md_lines.append(f"<!-- bbox: {bbox} -->")
  250. md_lines.append(f"# {text}\n")
  251. elif item['type'] == 'text':
  252. text = item.get('text', '')
  253. bbox = item.get('bbox', [])
  254. if bbox:
  255. md_lines.append(f"<!-- bbox: {bbox} -->")
  256. md_lines.append(f"{text}\n")
  257. elif item['type'] == 'table':
  258. md_lines.append("<!-- 表格单元格包含 data-bbox 属性 -->\n")
  259. md_lines.append(item.get('table_body_with_bbox', item.get('table_body', '')))
  260. md_lines.append("\n")
  261. elif item['type'] == 'image':
  262. img_path = item.get('img_path', '')
  263. # 需要将minerU图像路径下的图片拷贝到输出目录
  264. if img_path and mineru_file:
  265. mineru_dir = Path(mineru_file).parent
  266. img_full_path = mineru_dir / img_path
  267. if img_full_path.exists():
  268. # 需要将图片拷贝到输出目录
  269. output_img_path = Path(output_path).parent / img_path
  270. output_img_path.parent.mkdir(parents=True, exist_ok=True)
  271. shutil.copy(img_full_path, output_img_path)
  272. bbox = item.get('bbox', [])
  273. if bbox:
  274. md_lines.append(f"<!-- bbox: {bbox} -->")
  275. md_lines.append(f"![Image]({img_path})\n")
  276. markdown_content = '\n'.join(md_lines)
  277. if output_path:
  278. with open(output_path, 'w', encoding='utf-8') as f:
  279. f.write(markdown_content)
  280. return markdown_content
  281. def extract_table_cells_with_bbox(self, merged_data: List[Dict]) -> List[Dict]:
  282. """
  283. 提取所有表格单元格及其 bbox 信息
  284. Returns:
  285. 单元格列表,每个包含 text, bbox, row, col 等信息
  286. """
  287. cells = []
  288. for item in merged_data:
  289. if item['type'] != 'table':
  290. continue
  291. html = item.get('table_body_with_bbox', item.get('table_body', ''))
  292. soup = BeautifulSoup(html, 'html.parser')
  293. # 遍历所有行
  294. for row_idx, row in enumerate(soup.find_all('tr')):
  295. # 遍历所有单元格
  296. for col_idx, cell in enumerate(row.find_all(['td', 'th'])):
  297. cell_text = cell.get_text(strip=True)
  298. bbox_str = cell.get('data-bbox', '')
  299. if bbox_str:
  300. try:
  301. bbox = json.loads(bbox_str)
  302. cells.append({
  303. 'text': cell_text,
  304. 'bbox': bbox,
  305. 'row': row_idx,
  306. 'col': col_idx,
  307. 'score': float(cell.get('data-score', 0)),
  308. 'paddle_index': int(cell.get('data-paddle-index', -1))
  309. })
  310. except (json.JSONDecodeError, ValueError):
  311. pass
  312. return cells
  313. def merge_single_file(mineru_file: Path, paddle_file: Path, output_dir: Path,
  314. output_format: str, merger: MinerUPaddleOCRMerger) -> bool:
  315. """
  316. 合并单个文件
  317. Args:
  318. mineru_file: MinerU JSON 文件路径
  319. paddle_file: PaddleOCR JSON 文件路径
  320. output_dir: 输出目录
  321. merger: 合并器实例
  322. Returns:
  323. 是否成功
  324. """
  325. print(f"📄 处理: {mineru_file.name}")
  326. # 输出文件路径
  327. merged_md_path = output_dir / f"{mineru_file.stem}.md"
  328. merged_json_path = output_dir / f"{mineru_file.stem}.json"
  329. try:
  330. # 合并数据
  331. merged_data = merger.merge_table_with_bbox(
  332. str(mineru_file),
  333. str(paddle_file)
  334. )
  335. # 生成 Markdown
  336. if output_format in ['markdown', 'both']:
  337. merger.generate_enhanced_markdown(merged_data, str(merged_md_path), mineru_file)
  338. # 提取单元格信息
  339. # cells = merger.extract_table_cells_with_bbox(merged_data)
  340. if output_format in ['json', 'both']:
  341. with open(merged_json_path, 'w', encoding='utf-8') as f:
  342. json.dump(merged_data, f, ensure_ascii=False, indent=2)
  343. print(f" ✅ 合并完成")
  344. print(f" 📊 共处理了 {len(merged_data)} 个对象")
  345. print(f" 💾 输出文件:")
  346. if output_format in ['markdown', 'both']:
  347. print(f" - {merged_md_path.name}")
  348. if output_format in ['json', 'both']:
  349. print(f" - {merged_json_path.name}")
  350. return True
  351. except Exception as e:
  352. print(f" ❌ 处理失败: {e}")
  353. import traceback
  354. traceback.print_exc()
  355. return False
  356. def merge_mineru_paddle_batch(mineru_dir: str, paddle_dir: str, output_dir: str, output_format: str = 'both',
  357. look_ahead_window: int = 10,
  358. similarity_threshold: int = 80):
  359. """
  360. 批量合并 MinerU 和 PaddleOCR 的结果
  361. Args:
  362. mineru_dir: MinerU 结果目录
  363. paddle_dir: PaddleOCR 结果目录
  364. output_dir: 输出目录
  365. look_ahead_window: 向前查找窗口大小
  366. similarity_threshold: 相似度阈值
  367. """
  368. mineru_path = Path(mineru_dir)
  369. paddle_path = Path(paddle_dir)
  370. output_path = Path(output_dir)
  371. output_path.mkdir(parents=True, exist_ok=True)
  372. merger = MinerUPaddleOCRMerger(
  373. look_ahead_window=look_ahead_window,
  374. similarity_threshold=similarity_threshold
  375. )
  376. # 查找所有 MinerU 的 JSON 文件
  377. mineru_files = list(mineru_path.glob('*_page_*[0-9].json'))
  378. mineru_files.sort()
  379. print(f"\n🔍 找到 {len(mineru_files)} 个 MinerU 文件")
  380. print(f"📂 MinerU 目录: {mineru_dir}")
  381. print(f"📂 PaddleOCR 目录: {paddle_dir}")
  382. print(f"📂 输出目录: {output_dir}")
  383. print(f"⚙️ 查找窗口: {look_ahead_window}")
  384. print(f"⚙️ 相似度阈值: {similarity_threshold}%\n")
  385. success_count = 0
  386. failed_count = 0
  387. for mineru_file in mineru_files:
  388. # 查找对应的 PaddleOCR 文件
  389. paddle_file = paddle_path / mineru_file.name
  390. if not paddle_file.exists():
  391. print(f"⚠️ 跳过: 未找到对应的 PaddleOCR 文件: {paddle_file.name}\n")
  392. failed_count += 1
  393. continue
  394. if merge_single_file(mineru_file, paddle_file, output_path, output_format, merger):
  395. success_count += 1
  396. else:
  397. failed_count += 1
  398. print() # 空行分隔
  399. # 打印统计信息
  400. print("=" * 60)
  401. print(f"✅ 处理完成!")
  402. print(f"📊 统计信息:")
  403. print(f" - 总文件数: {len(mineru_files)}")
  404. print(f" - 成功: {success_count}")
  405. print(f" - 失败: {failed_count}")
  406. print("=" * 60)
  407. def main():
  408. """主函数"""
  409. parser = argparse.ArgumentParser(
  410. description='合并 MinerU 和 PaddleOCR 的识别结果,添加 bbox 坐标信息',
  411. formatter_class=argparse.RawDescriptionHelpFormatter,
  412. epilog="""
  413. 示例用法:
  414. 1. 批量处理整个目录:
  415. python merge_mineru_paddle_ocr.py \\
  416. --mineru-dir /path/to/mineru/results \\
  417. --paddle-dir /path/to/paddle/results \\
  418. --output-dir /path/to/output
  419. 2. 处理单个文件:
  420. python merge_mineru_paddle_ocr.py \\
  421. --mineru-file /path/to/file_page_001.json \\
  422. --paddle-file /path/to/file_page_001.json \\
  423. --output-dir /path/to/output
  424. 3. 自定义参数:
  425. python merge_mineru_paddle_ocr.py \\
  426. --mineru-dir /path/to/mineru \\
  427. --paddle-dir /path/to/paddle \\
  428. --output-dir /path/to/output \\
  429. --window 15 \\
  430. --threshold 85
  431. """
  432. )
  433. # 文件/目录参数
  434. file_group = parser.add_argument_group('文件参数')
  435. file_group.add_argument(
  436. '--mineru-file',
  437. type=str,
  438. help='MinerU 输出的 JSON 文件路径(单文件模式)'
  439. )
  440. file_group.add_argument(
  441. '--paddle-file',
  442. type=str,
  443. help='PaddleOCR 输出的 JSON 文件路径(单文件模式)'
  444. )
  445. dir_group = parser.add_argument_group('目录参数')
  446. dir_group.add_argument(
  447. '--mineru-dir',
  448. type=str,
  449. help='MinerU 结果目录(批量模式)'
  450. )
  451. dir_group.add_argument(
  452. '--paddle-dir',
  453. type=str,
  454. help='PaddleOCR 结果目录(批量模式)'
  455. )
  456. # 输出参数
  457. output_group = parser.add_argument_group('输出参数')
  458. output_group.add_argument(
  459. '-o', '--output-dir',
  460. type=str,
  461. required=True,
  462. help='输出目录(必需)'
  463. )
  464. output_group.add_argument(
  465. '-f', '--format',
  466. choices=['json', 'markdown', 'both'],
  467. default='both', help='输出格式'
  468. )
  469. # 算法参数
  470. algo_group = parser.add_argument_group('算法参数')
  471. algo_group.add_argument(
  472. '-w', '--window',
  473. type=int,
  474. default=15,
  475. help='向前查找的窗口大小(默认: 10)'
  476. )
  477. algo_group.add_argument(
  478. '-t', '--threshold',
  479. type=int,
  480. default=85,
  481. help='文本相似度阈值(0-100,默认: 80)'
  482. )
  483. args = parser.parse_args()
  484. output_format = args.format.lower()
  485. # 验证参数
  486. if args.mineru_file and args.paddle_file:
  487. # 单文件模式
  488. mineru_file = Path(args.mineru_file)
  489. paddle_file = Path(args.paddle_file)
  490. output_dir = Path(args.output_dir)
  491. if not mineru_file.exists():
  492. print(f"❌ 错误: MinerU 文件不存在: {mineru_file}")
  493. return
  494. if not paddle_file.exists():
  495. print(f"❌ 错误: PaddleOCR 文件不存在: {paddle_file}")
  496. return
  497. output_dir.mkdir(parents=True, exist_ok=True)
  498. print("\n🔧 单文件处理模式")
  499. print(f"📄 MinerU 文件: {mineru_file}")
  500. print(f"📄 PaddleOCR 文件: {paddle_file}")
  501. print(f"📂 输出目录: {output_dir}")
  502. print(f"⚙️ 查找窗口: {args.window}")
  503. print(f"⚙️ 相似度阈值: {args.threshold}%\n")
  504. merger = MinerUPaddleOCRMerger(
  505. look_ahead_window=args.window,
  506. similarity_threshold=args.threshold
  507. )
  508. success = merge_single_file(mineru_file, paddle_file, output_dir, output_format, merger)
  509. if success:
  510. print("\n✅ 处理完成!")
  511. else:
  512. print("\n❌ 处理失败!")
  513. elif args.mineru_dir and args.paddle_dir:
  514. # 批量模式
  515. if not Path(args.mineru_dir).exists():
  516. print(f"❌ 错误: MinerU 目录不存在: {args.mineru_dir}")
  517. return
  518. if not Path(args.paddle_dir).exists():
  519. print(f"❌ 错误: PaddleOCR 目录不存在: {args.paddle_dir}")
  520. return
  521. print("\n🔧 批量处理模式")
  522. merge_mineru_paddle_batch(
  523. args.mineru_dir,
  524. args.paddle_dir,
  525. args.output_dir,
  526. output_format=output_format,
  527. look_ahead_window=args.window,
  528. similarity_threshold=args.threshold
  529. )
  530. else:
  531. parser.print_help()
  532. print("\n❌ 错误: 请指定单文件模式或批量模式的参数")
  533. print(" 单文件模式: --mineru-file 和 --paddle-file")
  534. print(" 批量模式: --mineru-dir 和 --paddle-dir")
  535. if __name__ == "__main__":
  536. print("🚀 启动 MinerU + PaddleOCR 合并程序...")
  537. import sys
  538. if len(sys.argv) == 1:
  539. # 如果没有命令行参数,使用默认配置运行
  540. print("ℹ️ 未提供命令行参数,使用默认配置运行...")
  541. # 默认配置
  542. default_config = {
  543. "mineru-file": "/Users/zhch158/workspace/data/流水分析/B用户_扫描流水/mineru-vlm-2.5.3_Results/B用户_扫描流水_page_001.json",
  544. "paddle-file": "/Users/zhch158/workspace/data/流水分析/B用户_扫描流水/data_PPStructureV3_Results/B用户_扫描流水_page_001.json",
  545. "output-dir": "/Users/zhch158/workspace/data/流水分析/B用户_扫描流水/merged_results",
  546. # "mineru-dir": "/Users/zhch158/workspace/data/流水分析/德_内蒙古银行照/mineru-vlm-2.5.3_Results",
  547. # "paddle-dir": "/Users/zhch158/workspace/data/流水分析/德_内蒙古银行照/data_PPStructureV3_Results",
  548. # "output-dir": "/Users/zhch158/workspace/data/流水分析/德_内蒙古银行照/merged_results",
  549. "format": "both",
  550. "window": "15",
  551. "threshold": "85"
  552. }
  553. print("⚙️ 默认参数:")
  554. for key, value in default_config.items():
  555. print(f" --{key}: {value}")
  556. # 构造参数
  557. sys.argv = [sys.argv[0]]
  558. for key, value in default_config.items():
  559. sys.argv.extend([f"--{key}", str(value)])
  560. sys.exit(main())