merge_mineru_paddle_ocr.py 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633
  1. """
  2. 合并 MinerU 和 PaddleOCR 的结果
  3. 使用 MinerU 的表格结构识别 + PaddleOCR 的文字框坐标
  4. """
  5. import json
  6. import re
  7. import argparse
  8. from pathlib import Path
  9. from typing import List, Dict, Tuple, Optional
  10. from bs4 import BeautifulSoup
  11. from fuzzywuzzy import fuzz
  12. class MinerUPaddleOCRMerger:
  13. """合并 MinerU 和 PaddleOCR 的结果"""
  14. def __init__(self, look_ahead_window: int = 10, similarity_threshold: int = 90):
  15. """
  16. Args:
  17. look_ahead_window: 向前查找的窗口大小
  18. similarity_threshold: 文本相似度阈值
  19. """
  20. self.look_ahead_window = look_ahead_window
  21. self.similarity_threshold = similarity_threshold
  22. def merge_table_with_bbox(self, mineru_json_path: str, paddle_json_path: str) -> List[Dict]:
  23. """
  24. 合并 MinerU 和 PaddleOCR 的结果
  25. Args:
  26. mineru_json_path: MinerU 输出的 JSON 路径
  27. paddle_json_path: PaddleOCR 输出的 JSON 路径
  28. output_path: 输出路径(可选)
  29. Returns:
  30. 合并后的结果字典
  31. """
  32. merged_data = None
  33. # 加载数据
  34. with open(mineru_json_path, 'r', encoding='utf-8') as f:
  35. mineru_data = json.load(f)
  36. with open(paddle_json_path, 'r', encoding='utf-8') as f:
  37. paddle_data = json.load(f)
  38. # 提取 PaddleOCR 的文字框信息
  39. paddle_text_boxes = self._extract_paddle_text_boxes(paddle_data)
  40. # 处理 MinerU 的数据
  41. merged_data = self._process_mineru_data(mineru_data, paddle_text_boxes)
  42. return merged_data
  43. def _extract_paddle_text_boxes(self, paddle_data: Dict) -> List[Dict]:
  44. """提取 PaddleOCR 的文字框信息"""
  45. text_boxes = []
  46. if 'overall_ocr_res' in paddle_data:
  47. ocr_res = paddle_data['overall_ocr_res']
  48. rec_texts = ocr_res.get('rec_texts', [])
  49. rec_polys = ocr_res.get('rec_polys', [])
  50. rec_scores = ocr_res.get('rec_scores', [])
  51. for i, (text, poly, score) in enumerate(zip(rec_texts, rec_polys, rec_scores)):
  52. if text and text.strip():
  53. # 计算 bbox (x_min, y_min, x_max, y_max)
  54. xs = [p[0] for p in poly]
  55. ys = [p[1] for p in poly]
  56. bbox = [min(xs), min(ys), max(xs), max(ys)]
  57. text_boxes.append({
  58. 'text': text,
  59. 'bbox': bbox,
  60. 'poly': poly,
  61. 'score': score,
  62. 'paddle_bbox_index': i,
  63. 'used': False # 标记是否已被使用
  64. })
  65. return text_boxes
  66. def _process_mineru_data(self, mineru_data: List[Dict],
  67. paddle_text_boxes: List[Dict]) -> List[Dict]:
  68. """处理 MinerU 数据,添加 bbox 信息"""
  69. merged_data = []
  70. cells = None # 存储所有表格单元格信息
  71. paddle_pointer = 0 # PaddleOCR 文字框指针
  72. # 对mineru_data按bbox从上到下排序,从左到右确保顺序一致
  73. mineru_data.sort(key=lambda x: (x['bbox'][1], x['bbox'][0]) if 'bbox' in x else (float('inf'), float('inf')))
  74. for item in mineru_data:
  75. if item['type'] == 'table':
  76. # 处理表格
  77. merged_item = item.copy()
  78. table_html = item.get('table_body', '')
  79. # 解析 HTML 表格并添加 bbox
  80. enhanced_html, cells, paddle_pointer = self._enhance_table_html_with_bbox(
  81. table_html, paddle_text_boxes, paddle_pointer
  82. )
  83. merged_item['table_body'] = enhanced_html
  84. merged_item['table_body_with_bbox'] = enhanced_html
  85. merged_item['bbox_mapping'] = 'merged_from_paddle_ocr'
  86. merged_data.append(merged_item)
  87. elif item['type'] in ['text', 'header']:
  88. # 处理普通文本
  89. merged_item = item.copy()
  90. text = item.get('text', '')
  91. # 查找匹配的 bbox
  92. matched_bbox, paddle_pointer = self._find_matching_bbox(
  93. text, paddle_text_boxes, paddle_pointer
  94. )
  95. if matched_bbox:
  96. merged_item['bbox'] = matched_bbox['bbox']
  97. merged_item['bbox_source'] = 'paddle_ocr'
  98. merged_item['text_score'] = matched_bbox['score']
  99. # 标记为已使用
  100. matched_bbox['used'] = True
  101. merged_data.append(merged_item)
  102. else:
  103. # 其他类型直接复制
  104. merged_data.append(item.copy())
  105. if cells:
  106. merged_data.extend(cells)
  107. return merged_data
  108. def _enhance_table_html_with_bbox(self, html: str, paddle_text_boxes: List[Dict],
  109. start_pointer: int) -> Tuple[str, List[Dict], int]:
  110. """
  111. 为 HTML 表格添加 bbox 信息
  112. Args:
  113. html: 原始 HTML 表格
  114. paddle_text_boxes: PaddleOCR 文字框列表
  115. start_pointer: 起始指针位置
  116. Returns:
  117. (增强后的 HTML, 单元格数组, 新的指针位置)
  118. """
  119. soup = BeautifulSoup(html, 'html.parser')
  120. current_pointer = start_pointer
  121. cells = [] # 存储单元格的 bbox 信息
  122. # 遍历所有行
  123. for row_idx, row in enumerate(soup.find_all('tr')):
  124. # 遍历所有单元格
  125. for col_idx, cell in enumerate(row.find_all(['td', 'th'])):
  126. cell_text = cell.get_text(strip=True)
  127. if not cell_text:
  128. continue
  129. # 查找匹配的 bbox
  130. matched_bbox, current_pointer = self._find_matching_bbox(
  131. cell_text, paddle_text_boxes, current_pointer
  132. )
  133. if matched_bbox:
  134. # 添加 data-bbox 属性
  135. bbox = matched_bbox['bbox']
  136. cell['data-bbox'] = f"[{bbox[0]},{bbox[1]},{bbox[2]},{bbox[3]}]"
  137. cell['data-score'] = f"{matched_bbox['score']:.4f}"
  138. cell['data-paddle-index'] = str(matched_bbox['paddle_bbox_index'])
  139. cells.append({
  140. 'type': 'table_cell',
  141. 'text': cell_text,
  142. 'bbox': bbox,
  143. 'row': row_idx+1,
  144. 'col': col_idx+1,
  145. 'score': matched_bbox['score'],
  146. 'paddle_bbox_index': matched_bbox['paddle_bbox_index']
  147. })
  148. # 标记为已使用
  149. matched_bbox['used'] = True
  150. return str(soup), cells, current_pointer
  151. def _find_matching_bbox(self, target_text: str, text_boxes: List[Dict],
  152. start_index: int) -> tuple[Optional[Dict], int]:
  153. """
  154. 查找匹配的文字框
  155. Args:
  156. target_text: 目标文本
  157. text_boxes: 文字框列表
  158. start_index: 起始索引
  159. Returns:
  160. (匹配的文字框信息, 新的指针位置)
  161. """
  162. target_text = self._normalize_text(target_text)
  163. # 在窗口范围内查找, 窗口是start_index往回移动窗口的1/3到start_index + look_ahead_window
  164. search_start = max(0, int(start_index - self.look_ahead_window/3))
  165. search_end = min(start_index + self.look_ahead_window, len(text_boxes))
  166. best_match = None
  167. best_index = start_index
  168. for i in range(search_start, search_end):
  169. if text_boxes[i]['used']:
  170. continue
  171. box_text = self._normalize_text(text_boxes[i]['text'])
  172. # 计算相似度
  173. similarity = fuzz.partial_ratio(target_text, box_text)
  174. # 精确匹配优先
  175. if target_text == box_text:
  176. return text_boxes[i], i + 1
  177. # 大于阈值就返回,不找最佳
  178. # if similarity > best_similarity and similarity >= self.similarity_threshold:
  179. if similarity >= self.similarity_threshold:
  180. return text_boxes[i], i + 1
  181. return best_match, best_index
  182. def _normalize_text(self, text: str) -> str:
  183. """标准化文本(去除空格、标点等)"""
  184. # 移除所有空白字符
  185. text = re.sub(r'\s+', '', text)
  186. # 转换全角数字和字母为半角
  187. text = self._full_to_half(text)
  188. return text.lower()
  189. def _full_to_half(self, text: str) -> str:
  190. """全角转半角"""
  191. result = []
  192. for char in text:
  193. code = ord(char)
  194. if code == 0x3000: # 全角空格
  195. code = 0x0020
  196. elif 0xFF01 <= code <= 0xFF5E: # 全角字符
  197. code -= 0xFEE0
  198. result.append(chr(code))
  199. return ''.join(result)
  200. def generate_enhanced_markdown(self, merged_data: List[Dict],
  201. output_path: Optional[str] = None) -> str:
  202. """
  203. 生成增强的 Markdown(包含 bbox 信息的注释)
  204. Args:
  205. merged_data: 合并后的数据
  206. output_path: 输出路径(可选)
  207. Returns:
  208. Markdown 内容
  209. """
  210. md_lines = []
  211. for item in merged_data:
  212. if item['type'] == 'header':
  213. text = item.get('text', '')
  214. bbox = item.get('bbox', [])
  215. md_lines.append(f"<!-- bbox: {bbox} -->")
  216. md_lines.append(f"# {text}\n")
  217. elif item['type'] == 'text':
  218. text = item.get('text', '')
  219. bbox = item.get('bbox', [])
  220. if bbox:
  221. md_lines.append(f"<!-- bbox: {bbox} -->")
  222. md_lines.append(f"{text}\n")
  223. elif item['type'] == 'table':
  224. md_lines.append("<!-- 表格单元格包含 data-bbox 属性 -->\n")
  225. md_lines.append(item.get('table_body_with_bbox', item.get('table_body', '')))
  226. md_lines.append("\n")
  227. elif item['type'] == 'image':
  228. img_path = item.get('img_path', '')
  229. bbox = item.get('bbox', [])
  230. if bbox:
  231. md_lines.append(f"<!-- bbox: {bbox} -->")
  232. md_lines.append(f"![Image]({img_path})\n")
  233. markdown_content = '\n'.join(md_lines)
  234. if output_path:
  235. with open(output_path, 'w', encoding='utf-8') as f:
  236. f.write(markdown_content)
  237. return markdown_content
  238. def extract_table_cells_with_bbox(self, merged_data: List[Dict]) -> List[Dict]:
  239. """
  240. 提取所有表格单元格及其 bbox 信息
  241. Returns:
  242. 单元格列表,每个包含 text, bbox, row, col 等信息
  243. """
  244. cells = []
  245. for item in merged_data:
  246. if item['type'] != 'table':
  247. continue
  248. html = item.get('table_body_with_bbox', item.get('table_body', ''))
  249. soup = BeautifulSoup(html, 'html.parser')
  250. # 遍历所有行
  251. for row_idx, row in enumerate(soup.find_all('tr')):
  252. # 遍历所有单元格
  253. for col_idx, cell in enumerate(row.find_all(['td', 'th'])):
  254. cell_text = cell.get_text(strip=True)
  255. bbox_str = cell.get('data-bbox', '')
  256. if bbox_str:
  257. try:
  258. bbox = json.loads(bbox_str)
  259. cells.append({
  260. 'text': cell_text,
  261. 'bbox': bbox,
  262. 'row': row_idx,
  263. 'col': col_idx,
  264. 'score': float(cell.get('data-score', 0)),
  265. 'paddle_index': int(cell.get('data-paddle-index', -1))
  266. })
  267. except (json.JSONDecodeError, ValueError):
  268. pass
  269. return cells
  270. def merge_single_file(mineru_file: Path, paddle_file: Path, output_dir: Path,
  271. output_format: str, merger: MinerUPaddleOCRMerger) -> bool:
  272. """
  273. 合并单个文件
  274. Args:
  275. mineru_file: MinerU JSON 文件路径
  276. paddle_file: PaddleOCR JSON 文件路径
  277. output_dir: 输出目录
  278. merger: 合并器实例
  279. Returns:
  280. 是否成功
  281. """
  282. print(f"📄 处理: {mineru_file.name}")
  283. # 输出文件路径
  284. merged_md_path = output_dir / f"{mineru_file.stem}.md"
  285. merged_json_path = output_dir / f"{mineru_file.stem}.json"
  286. try:
  287. # 合并数据
  288. merged_data = merger.merge_table_with_bbox(
  289. str(mineru_file),
  290. str(paddle_file)
  291. )
  292. # 生成 Markdown
  293. if output_format in ['markdown', 'both']:
  294. merger.generate_enhanced_markdown(merged_data, str(merged_md_path))
  295. # 提取单元格信息
  296. # cells = merger.extract_table_cells_with_bbox(merged_data)
  297. if output_format in ['json', 'both']:
  298. with open(merged_json_path, 'w', encoding='utf-8') as f:
  299. json.dump(merged_data, f, ensure_ascii=False, indent=2)
  300. print(f" ✅ 合并完成")
  301. print(f" 📊 共处理了 {len(merged_data)} 个对象")
  302. print(f" 💾 输出文件:")
  303. print(f" - {merged_json_path.name}")
  304. return True
  305. except Exception as e:
  306. print(f" ❌ 处理失败: {e}")
  307. import traceback
  308. traceback.print_exc()
  309. return False
  310. def merge_mineru_paddle_batch(mineru_dir: str, paddle_dir: str, output_dir: str, output_format: str = 'both',
  311. look_ahead_window: int = 10,
  312. similarity_threshold: int = 80):
  313. """
  314. 批量合并 MinerU 和 PaddleOCR 的结果
  315. Args:
  316. mineru_dir: MinerU 结果目录
  317. paddle_dir: PaddleOCR 结果目录
  318. output_dir: 输出目录
  319. look_ahead_window: 向前查找窗口大小
  320. similarity_threshold: 相似度阈值
  321. """
  322. mineru_path = Path(mineru_dir)
  323. paddle_path = Path(paddle_dir)
  324. output_path = Path(output_dir)
  325. output_path.mkdir(parents=True, exist_ok=True)
  326. merger = MinerUPaddleOCRMerger(
  327. look_ahead_window=look_ahead_window,
  328. similarity_threshold=similarity_threshold
  329. )
  330. # 查找所有 MinerU 的 JSON 文件
  331. mineru_files = list(mineru_path.glob('*_page_*[0-9].json'))
  332. mineru_files.sort()
  333. print(f"\n🔍 找到 {len(mineru_files)} 个 MinerU 文件")
  334. print(f"📂 MinerU 目录: {mineru_dir}")
  335. print(f"📂 PaddleOCR 目录: {paddle_dir}")
  336. print(f"📂 输出目录: {output_dir}")
  337. print(f"⚙️ 查找窗口: {look_ahead_window}")
  338. print(f"⚙️ 相似度阈值: {similarity_threshold}%\n")
  339. success_count = 0
  340. failed_count = 0
  341. for mineru_file in mineru_files:
  342. # 查找对应的 PaddleOCR 文件
  343. paddle_file = paddle_path / mineru_file.name
  344. if not paddle_file.exists():
  345. print(f"⚠️ 跳过: 未找到对应的 PaddleOCR 文件: {paddle_file.name}\n")
  346. failed_count += 1
  347. continue
  348. if merge_single_file(mineru_file, paddle_file, output_path, output_format, merger):
  349. success_count += 1
  350. else:
  351. failed_count += 1
  352. print() # 空行分隔
  353. # 打印统计信息
  354. print("=" * 60)
  355. print(f"✅ 处理完成!")
  356. print(f"📊 统计信息:")
  357. print(f" - 总文件数: {len(mineru_files)}")
  358. print(f" - 成功: {success_count}")
  359. print(f" - 失败: {failed_count}")
  360. print("=" * 60)
  361. def main():
  362. """主函数"""
  363. parser = argparse.ArgumentParser(
  364. description='合并 MinerU 和 PaddleOCR 的识别结果,添加 bbox 坐标信息',
  365. formatter_class=argparse.RawDescriptionHelpFormatter,
  366. epilog="""
  367. 示例用法:
  368. 1. 批量处理整个目录:
  369. python merge_mineru_paddle_ocr.py \\
  370. --mineru-dir /path/to/mineru/results \\
  371. --paddle-dir /path/to/paddle/results \\
  372. --output-dir /path/to/output
  373. 2. 处理单个文件:
  374. python merge_mineru_paddle_ocr.py \\
  375. --mineru-file /path/to/file_page_001.json \\
  376. --paddle-file /path/to/file_page_001.json \\
  377. --output-dir /path/to/output
  378. 3. 自定义参数:
  379. python merge_mineru_paddle_ocr.py \\
  380. --mineru-dir /path/to/mineru \\
  381. --paddle-dir /path/to/paddle \\
  382. --output-dir /path/to/output \\
  383. --window 15 \\
  384. --threshold 85
  385. """
  386. )
  387. # 文件/目录参数
  388. file_group = parser.add_argument_group('文件参数')
  389. file_group.add_argument(
  390. '--mineru-file',
  391. type=str,
  392. help='MinerU 输出的 JSON 文件路径(单文件模式)'
  393. )
  394. file_group.add_argument(
  395. '--paddle-file',
  396. type=str,
  397. help='PaddleOCR 输出的 JSON 文件路径(单文件模式)'
  398. )
  399. dir_group = parser.add_argument_group('目录参数')
  400. dir_group.add_argument(
  401. '--mineru-dir',
  402. type=str,
  403. help='MinerU 结果目录(批量模式)'
  404. )
  405. dir_group.add_argument(
  406. '--paddle-dir',
  407. type=str,
  408. help='PaddleOCR 结果目录(批量模式)'
  409. )
  410. # 输出参数
  411. output_group = parser.add_argument_group('输出参数')
  412. output_group.add_argument(
  413. '-o', '--output-dir',
  414. type=str,
  415. required=True,
  416. help='输出目录(必需)'
  417. )
  418. output_group.add_argument(
  419. '-f', '--format',
  420. choices=['json', 'markdown', 'both'],
  421. default='json', help='输出格式'
  422. )
  423. # 算法参数
  424. algo_group = parser.add_argument_group('算法参数')
  425. algo_group.add_argument(
  426. '-w', '--window',
  427. type=int,
  428. default=10,
  429. help='向前查找的窗口大小(默认: 10)'
  430. )
  431. algo_group.add_argument(
  432. '-t', '--threshold',
  433. type=int,
  434. default=80,
  435. help='文本相似度阈值(0-100,默认: 80)'
  436. )
  437. args = parser.parse_args()
  438. output_format = args.format.lower()
  439. # 验证参数
  440. if args.mineru_file and args.paddle_file:
  441. # 单文件模式
  442. mineru_file = Path(args.mineru_file)
  443. paddle_file = Path(args.paddle_file)
  444. output_dir = Path(args.output_dir)
  445. if not mineru_file.exists():
  446. print(f"❌ 错误: MinerU 文件不存在: {mineru_file}")
  447. return
  448. if not paddle_file.exists():
  449. print(f"❌ 错误: PaddleOCR 文件不存在: {paddle_file}")
  450. return
  451. output_dir.mkdir(parents=True, exist_ok=True)
  452. print("\n🔧 单文件处理模式")
  453. print(f"📄 MinerU 文件: {mineru_file}")
  454. print(f"📄 PaddleOCR 文件: {paddle_file}")
  455. print(f"📂 输出目录: {output_dir}")
  456. print(f"⚙️ 查找窗口: {args.window}")
  457. print(f"⚙️ 相似度阈值: {args.threshold}%\n")
  458. merger = MinerUPaddleOCRMerger(
  459. look_ahead_window=args.window,
  460. similarity_threshold=args.threshold
  461. )
  462. success = merge_single_file(mineru_file, paddle_file, output_dir, output_format, merger)
  463. if success:
  464. print("\n✅ 处理完成!")
  465. else:
  466. print("\n❌ 处理失败!")
  467. elif args.mineru_dir and args.paddle_dir:
  468. # 批量模式
  469. if not Path(args.mineru_dir).exists():
  470. print(f"❌ 错误: MinerU 目录不存在: {args.mineru_dir}")
  471. return
  472. if not Path(args.paddle_dir).exists():
  473. print(f"❌ 错误: PaddleOCR 目录不存在: {args.paddle_dir}")
  474. return
  475. print("\n🔧 批量处理模式")
  476. merge_mineru_paddle_batch(
  477. args.mineru_dir,
  478. args.paddle_dir,
  479. args.output_dir,
  480. output_format=output_format,
  481. look_ahead_window=args.window,
  482. similarity_threshold=args.threshold
  483. )
  484. else:
  485. parser.print_help()
  486. print("\n❌ 错误: 请指定单文件模式或批量模式的参数")
  487. print(" 单文件模式: --mineru-file 和 --paddle-file")
  488. print(" 批量模式: --mineru-dir 和 --paddle-dir")
  489. if __name__ == "__main__":
  490. print("🚀 启动 MinerU + PaddleOCR 合并程序...")
  491. import sys
  492. if len(sys.argv) == 1:
  493. # 如果没有命令行参数,使用默认配置运行
  494. print("ℹ️ 未提供命令行参数,使用默认配置运行...")
  495. # 默认配置
  496. default_config = {
  497. "mineru-dir": "/Users/zhch158/workspace/data/流水分析/德_内蒙古银行照/mineru-vlm-2.5.3_Results",
  498. "paddle-dir": "/Users/zhch158/workspace/data/流水分析/德_内蒙古银行照/data_PPStructureV3_Results",
  499. "output-dir": "/Users/zhch158/workspace/data/流水分析/德_内蒙古银行照/merged_results",
  500. "format": "both",
  501. "window": "15",
  502. "threshold": "85"
  503. }
  504. print(f"📂 MinerU 目录: {default_config['mineru-dir']}")
  505. print(f"📂 PaddleOCR 目录: {default_config['paddle-dir']}")
  506. print(f"📂 输出目录: {default_config['output-dir']}")
  507. print(f"⚙️ 查找窗口: {default_config['window']}")
  508. print(f"⚙️ 相似度阈值: {default_config['threshold']}%\n")
  509. # 构造参数
  510. sys.argv = [sys.argv[0]]
  511. for key, value in default_config.items():
  512. sys.argv.extend([f"--{key}", str(value)])
  513. sys.exit(main())