data_processor_v2.py 49 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324
  1. """
  2. 数据处理模块
  3. 负责处理 MinerU/PaddleOCR_VL/DotsOCR 数据,添加 bbox 信息
  4. """
  5. from typing import List, Dict, Tuple, Optional
  6. from bs4 import BeautifulSoup
  7. try:
  8. from .text_matcher import TextMatcher
  9. from .bbox_extractor import BBoxExtractor
  10. except ImportError:
  11. from text_matcher import TextMatcher
  12. from bbox_extractor import BBoxExtractor
  13. class DataProcessor:
  14. """数据处理器"""
  15. def __init__(self, text_matcher: TextMatcher, look_ahead_window: int = 10):
  16. """
  17. Args:
  18. text_matcher: 文本匹配器
  19. look_ahead_window: 向前查找窗口
  20. """
  21. self.text_matcher = text_matcher
  22. self.look_ahead_window = look_ahead_window
  23. def process_mineru_data(self, mineru_data: List[Dict],
  24. paddle_text_boxes: List[Dict]) -> List[Dict]:
  25. """
  26. 处理 MinerU 数据,添加 bbox 信息
  27. Args:
  28. mineru_data: MinerU 数据
  29. paddle_text_boxes: PaddleOCR 文字框列表
  30. Returns:
  31. 合并后的数据, table cell使用paddle的bbox,其他类型只是移动指针,bbox还是沿用minerU的bbox
  32. """
  33. merged_data = []
  34. paddle_pointer = 0
  35. last_matched_index = 0
  36. # 按 bbox 排序
  37. mineru_data.sort(
  38. key=lambda x: (x['bbox'][1], x['bbox'][0])
  39. if 'bbox' in x else (float('inf'), float('inf'))
  40. )
  41. for item in mineru_data:
  42. item_type = item.get('type', '')
  43. if item_type == 'table':
  44. merged_item, paddle_pointer = self._process_table(
  45. item, paddle_text_boxes, paddle_pointer
  46. )
  47. merged_data.append(merged_item)
  48. elif item_type in ['text', 'title']:
  49. merged_item, paddle_pointer, last_matched_index = self._process_text(
  50. item, paddle_text_boxes, paddle_pointer, last_matched_index
  51. )
  52. merged_data.append(merged_item)
  53. elif item_type == 'list':
  54. merged_item, paddle_pointer, last_matched_index = self._process_list(
  55. item, paddle_text_boxes, paddle_pointer, last_matched_index
  56. )
  57. merged_data.append(merged_item)
  58. else:
  59. merged_data.append(item.copy())
  60. return merged_data
  61. def process_dotsocr_data(self, dotsocr_data: List[Dict],
  62. paddle_text_boxes: List[Dict]) -> List[Dict]:
  63. """
  64. 🎯 处理 DotsOCR 数据,转换为 MinerU 格式并添加 bbox 信息
  65. Args:
  66. dotsocr_data: DotsOCR 数据
  67. paddle_text_boxes: PaddleOCR 文字框列表
  68. Returns:
  69. MinerU 格式的合并数据
  70. """
  71. merged_data = []
  72. paddle_pointer = 0
  73. last_matched_index = 0
  74. # 按 bbox 排序
  75. dotsocr_data.sort(
  76. key=lambda x: (x['bbox'][1], x['bbox'][0])
  77. if 'bbox' in x else (float('inf'), float('inf'))
  78. )
  79. for item in dotsocr_data:
  80. # 🎯 转换为 MinerU 格式
  81. mineru_item = self._convert_dotsocr_to_mineru(item)
  82. category = mineru_item.get('type', '')
  83. # 🎯 根据类型处理
  84. if category.lower() == 'table':
  85. merged_item, paddle_pointer = self._process_table(
  86. mineru_item, paddle_text_boxes, paddle_pointer
  87. )
  88. merged_data.append(merged_item)
  89. elif category.lower() in ['text', 'title', 'header', 'footer']:
  90. merged_item, paddle_pointer, last_matched_index = self._process_text(
  91. mineru_item, paddle_text_boxes, paddle_pointer, last_matched_index
  92. )
  93. merged_data.append(merged_item)
  94. elif category.lower() == 'list':
  95. merged_item, paddle_pointer, last_matched_index = self._process_list(
  96. mineru_item, paddle_text_boxes, paddle_pointer, last_matched_index
  97. )
  98. merged_data.append(merged_item)
  99. else:
  100. # Page-header, Page-footer, Picture 等
  101. merged_data.append(mineru_item)
  102. return merged_data
  103. def _convert_dotsocr_to_mineru(self, dotsocr_item: Dict) -> Dict:
  104. """
  105. 🎯 将 DotsOCR 格式转换为 MinerU 格式
  106. DotsOCR:
  107. {
  108. "category": "Table",
  109. "bbox": [x1, y1, x2, y2],
  110. "text": "..."
  111. }
  112. MinerU:
  113. {
  114. "type": "table",
  115. "bbox": [x1, y1, x2, y2],
  116. "table_body": "...",
  117. "page_idx": 0
  118. }
  119. """
  120. category = dotsocr_item.get('category', '')
  121. # 🎯 Category 映射
  122. category_map = {
  123. 'Page-header': 'header',
  124. 'Page-footer': 'footer',
  125. 'Picture': 'image',
  126. 'Figure': 'image',
  127. 'Section-header': 'title',
  128. 'Table': 'table',
  129. 'Text': 'text',
  130. 'Title': 'title',
  131. 'List': 'list',
  132. 'Caption': 'title'
  133. }
  134. mineru_type = category_map.get(category, 'text')
  135. # 🎯 基础转换
  136. mineru_item = {
  137. 'type': mineru_type,
  138. 'bbox': dotsocr_item.get('bbox', []),
  139. 'page_idx': 0 # DotsOCR 默认单页
  140. }
  141. # 🎯 处理文本内容
  142. text = dotsocr_item.get('text', '')
  143. if mineru_type == 'table':
  144. # 表格:text -> table_body
  145. mineru_item['table_body'] = text
  146. else:
  147. # 其他类型:保持 text
  148. mineru_item['text'] = text
  149. # 标题级别
  150. if category == 'Section-header':
  151. mineru_item['text_level'] = 1
  152. return mineru_item
  153. def process_paddleocr_vl_data(self, paddleocr_vl_data: Dict,
  154. paddle_text_boxes: List[Dict]) -> List[Dict]:
  155. """
  156. 处理 PaddleOCR_VL 数据,添加 bbox 信息
  157. Args:
  158. paddleocr_vl_data: PaddleOCR_VL 数据 (JSON 对象)
  159. paddle_text_boxes: PaddleOCR 文字框列表
  160. Returns:
  161. 🎯 MinerU 格式的合并数据(统一输出格式)
  162. """
  163. merged_data = []
  164. paddle_pointer = 0
  165. last_matched_index = 0
  166. # 🎯 获取旋转角度和原始图像尺寸
  167. rotation_angle = self._get_rotation_angle_from_vl(paddleocr_vl_data)
  168. orig_image_size = None
  169. if rotation_angle != 0:
  170. orig_image_size = self._get_original_image_size_from_vl(paddleocr_vl_data)
  171. print(f"🔄 PaddleOCR_VL 检测到旋转角度: {rotation_angle}°")
  172. print(f"📐 原始图像尺寸: {orig_image_size[0]} x {orig_image_size[1]}")
  173. # 提取 parsing_res_list
  174. parsing_res_list = paddleocr_vl_data.get('parsing_res_list', [])
  175. # 按 bbox 排序
  176. parsing_res_list.sort(
  177. key=lambda x: (x['block_bbox'][1], x['block_bbox'][0])
  178. if 'block_bbox' in x else (float('inf'), float('inf'))
  179. )
  180. for item in parsing_res_list:
  181. # 🎯 先转换 bbox 坐标(如果需要)
  182. if rotation_angle != 0 and orig_image_size:
  183. item = self._transform_vl_block_bbox(item, rotation_angle, orig_image_size)
  184. # 🎯 统一转换为 MinerU 格式
  185. mineru_item = self._convert_paddleocr_vl_to_mineru(item)
  186. item_type = mineru_item.get('type', '')
  187. # 🎯 根据类型处理(复用 MinerU 的通用方法)
  188. if item_type == 'table':
  189. merged_item, paddle_pointer = self._process_table(
  190. mineru_item, paddle_text_boxes, paddle_pointer
  191. )
  192. merged_data.append(merged_item)
  193. elif item_type in ['text', 'title', 'header', 'footer', 'equation']:
  194. merged_item, paddle_pointer, last_matched_index = self._process_text(
  195. mineru_item, paddle_text_boxes, paddle_pointer, last_matched_index
  196. )
  197. merged_data.append(merged_item)
  198. elif item_type == 'list':
  199. merged_item, paddle_pointer, last_matched_index = self._process_list(
  200. mineru_item, paddle_text_boxes, paddle_pointer, last_matched_index
  201. )
  202. merged_data.append(merged_item)
  203. else:
  204. # 其他类型(image 等)直接添加
  205. merged_data.append(mineru_item)
  206. return merged_data
  207. def _get_rotation_angle_from_vl(self, paddleocr_vl_data: Dict) -> float:
  208. """从 PaddleOCR_VL 数据中获取旋转角度"""
  209. return BBoxExtractor._get_rotation_angle(paddleocr_vl_data)
  210. def _get_original_image_size_from_vl(self, paddleocr_vl_data: Dict) -> tuple:
  211. """从 PaddleOCR_VL 数据中获取原始图像尺寸"""
  212. return BBoxExtractor._get_original_image_size(paddleocr_vl_data)
  213. def _transform_vl_block_bbox(self, item: Dict, angle: float,
  214. orig_image_size: tuple) -> Dict:
  215. """
  216. 转换 PaddleOCR_VL 的 block_bbox 坐标
  217. Args:
  218. item: PaddleOCR_VL 的 block 数据
  219. angle: 旋转角度
  220. orig_image_size: 原始图像尺寸
  221. Returns:
  222. 转换后的 block 数据
  223. """
  224. transformed_item = item.copy()
  225. if 'block_bbox' not in item:
  226. return transformed_item
  227. block_bbox = item['block_bbox']
  228. if len(block_bbox) < 4:
  229. return transformed_item
  230. # block_bbox 格式: [x1, y1, x2, y2]
  231. # 转换为 poly 格式进行旋转
  232. poly = [
  233. [block_bbox[0], block_bbox[1]], # 左上
  234. [block_bbox[2], block_bbox[1]], # 右上
  235. [block_bbox[2], block_bbox[3]], # 右下
  236. [block_bbox[0], block_bbox[3]] # 左下
  237. ]
  238. # 🎯 使用 BBoxExtractor 的坐标转换方法
  239. transformed_poly = BBoxExtractor._inverse_rotate_coordinates(
  240. poly, angle, orig_image_size
  241. )
  242. # 转换回 bbox 格式
  243. xs = [p[0] for p in transformed_poly]
  244. ys = [p[1] for p in transformed_poly]
  245. transformed_bbox = [min(xs), min(ys), max(xs), max(ys)]
  246. transformed_item['block_bbox'] = transformed_bbox
  247. return transformed_item
  248. def _convert_paddleocr_vl_to_mineru(self, paddleocr_vl_item: Dict) -> Dict:
  249. """
  250. 🎯 将 PaddleOCR_VL 格式转换为 MinerU 格式
  251. 基于 PP-DocLayout_plus-L 的 20 种类别
  252. """
  253. block_label = paddleocr_vl_item.get('block_label', '')
  254. # 🎯 PP-DocLayout_plus-L 类别映射(共 20 种)
  255. label_map = {
  256. # 标题类(3种)
  257. 'paragraph_title': 'title',
  258. 'doc_title': 'title',
  259. 'figure_table_chart_title': 'title',
  260. # 文本类(9种)
  261. 'text': 'text',
  262. 'number': 'text',
  263. 'content': 'text',
  264. 'abstract': 'text',
  265. 'footnote': 'text',
  266. 'aside_text': 'text',
  267. 'algorithm': 'text',
  268. 'reference': 'text',
  269. 'reference_content': 'text',
  270. # 页眉页脚(2种)
  271. 'header': 'header',
  272. 'footer': 'footer',
  273. # 表格(1种)
  274. 'table': 'table',
  275. # 图片/图表(3种)
  276. 'image': 'image',
  277. 'chart': 'image',
  278. 'seal': 'image',
  279. # 公式(2种)
  280. 'formula': 'equation',
  281. 'formula_number': 'equation'
  282. }
  283. mineru_type = label_map.get(block_label, 'text')
  284. mineru_item = {
  285. 'type': mineru_type,
  286. 'bbox': paddleocr_vl_item.get('block_bbox', []),
  287. 'page_idx': 0
  288. }
  289. content = paddleocr_vl_item.get('block_content', '')
  290. if mineru_type == 'table':
  291. mineru_item['table_body'] = content
  292. else:
  293. mineru_item['text'] = content
  294. # 标题级别
  295. if block_label == 'doc_title':
  296. mineru_item['text_level'] = 1
  297. elif block_label == 'paragraph_title':
  298. mineru_item['text_level'] = 2
  299. elif block_label == 'figure_table_chart_title':
  300. mineru_item['text_level'] = 3
  301. return mineru_item
  302. def _process_table(self, item: Dict, paddle_text_boxes: List[Dict],
  303. start_pointer: int) -> Tuple[Dict, int]:
  304. """
  305. 处理表格类型(MinerU 格式)
  306. 策略:
  307. - 解析 HTML 表格
  308. - 为每个单元格匹配 PaddleOCR 的 bbox
  309. - 返回处理后的表格和新指针位置
  310. """
  311. table_body = item.get('table_body', '')
  312. if not table_body:
  313. print(f"⚠️ 表格内容为空,跳过")
  314. return item, start_pointer
  315. try:
  316. # 🔑 传入 table_bbox 用于筛选
  317. table_bbox = item.get('bbox') # MinerU 提供的表格边界
  318. enhanced_html, cells, new_pointer = self._enhance_table_html_with_bbox(
  319. table_body,
  320. paddle_text_boxes,
  321. start_pointer,
  322. table_bbox # ✅ 传入边界框
  323. )
  324. # 更新 item
  325. item['table_body'] = enhanced_html
  326. item['table_cells'] = cells
  327. # 统计信息
  328. matched_count = len(cells)
  329. total_cells = len(BeautifulSoup(table_body, 'html.parser').find_all(['td', 'th']))
  330. print(f" 表格单元格: {matched_count}/{total_cells} 匹配")
  331. return item, new_pointer
  332. except Exception as e:
  333. print(f"⚠️ 表格处理失败: {e}")
  334. import traceback
  335. traceback.print_exc()
  336. return item, start_pointer
  337. def _process_text(self, item: Dict, paddle_text_boxes: List[Dict],
  338. paddle_pointer: int, last_matched_index: int) -> Tuple[Dict, int, int]:
  339. """处理文本"""
  340. merged_item = item.copy()
  341. text = item.get('text', '')
  342. matched_bbox, paddle_pointer, last_matched_index = \
  343. self.text_matcher.find_matching_bbox(
  344. text, paddle_text_boxes, paddle_pointer, last_matched_index,
  345. self.look_ahead_window
  346. )
  347. if matched_bbox:
  348. matched_bbox['used'] = True
  349. return merged_item, paddle_pointer, last_matched_index
  350. def _process_list(self, item: Dict, paddle_text_boxes: List[Dict],
  351. paddle_pointer: int, last_matched_index: int) -> Tuple[Dict, int, int]:
  352. """处理列表"""
  353. merged_item = item.copy()
  354. list_items = item.get('list_items', [])
  355. for list_item in list_items:
  356. matched_bbox, paddle_pointer, last_matched_index = \
  357. self.text_matcher.find_matching_bbox(
  358. list_item, paddle_text_boxes, paddle_pointer, last_matched_index,
  359. self.look_ahead_window
  360. )
  361. if matched_bbox:
  362. matched_bbox['used'] = True
  363. return merged_item, paddle_pointer, last_matched_index
  364. def _enhance_table_html_with_bbox(self, html: str, paddle_text_boxes: List[Dict],
  365. start_pointer: int, table_bbox: Optional[List[int]] = None) -> Tuple[str, List[Dict], int]:
  366. """
  367. 为 HTML 表格添加 bbox 信息(优化版:先筛选表格区域)
  368. 策略:
  369. 1. 根据 table_bbox 筛选出表格区域内的 paddle_text_boxes
  370. 2. 将筛选后的 boxes 按行分组
  371. 3. 智能匹配 HTML 行与 paddle 行组
  372. 4. 在匹配的组内查找单元格
  373. Args:
  374. html: HTML 表格
  375. paddle_text_boxes: 全部 paddle OCR 结果
  376. start_pointer: 开始位置
  377. table_bbox: 表格边界框 [x1, y1, x2, y2]
  378. """
  379. soup = BeautifulSoup(html, 'html.parser')
  380. cells = []
  381. # 🔑 第一步:筛选表格区域内的 paddle boxes
  382. table_region_boxes, actual_table_bbox = self._filter_boxes_in_table_region(
  383. paddle_text_boxes[start_pointer:],
  384. table_bbox,
  385. html
  386. )
  387. if not table_region_boxes:
  388. print(f"⚠️ 未在表格区域找到 paddle boxes")
  389. return str(soup), cells, start_pointer
  390. print(f"📊 表格区域: {len(table_region_boxes)} 个文本框")
  391. print(f" 边界: {actual_table_bbox}")
  392. # 🔑 第二步:将表格区域的 boxes 按行分组
  393. grouped_boxes = self._group_paddle_boxes_by_rows(
  394. table_region_boxes,
  395. y_tolerance=20
  396. )
  397. # 🔑 第三步:在每组内按 x 坐标排序
  398. for group in grouped_boxes:
  399. group['boxes'].sort(key=lambda x: x['bbox'][0])
  400. grouped_boxes.sort(key=lambda g: g['y_center'])
  401. print(f" 分组: {len(grouped_boxes)} 行")
  402. # 🔑 第四步:智能匹配 HTML 行与 paddle 行组
  403. html_rows = soup.find_all('tr')
  404. row_mapping = self._match_html_rows_to_paddle_groups(html_rows, grouped_boxes)
  405. print(f" HTML行: {len(html_rows)} 行")
  406. print(f" 映射: {len([v for v in row_mapping.values() if v])} 个有效映射")
  407. # 🔑 第五步:遍历 HTML 表格,使用映射关系查找
  408. for row_idx, row in enumerate(html_rows):
  409. group_indices = row_mapping.get(row_idx, [])
  410. if not group_indices:
  411. continue
  412. # 合并多个组的 boxes
  413. current_boxes = []
  414. for group_idx in group_indices:
  415. if group_idx < len(grouped_boxes):
  416. current_boxes.extend(grouped_boxes[group_idx]['boxes'])
  417. current_boxes.sort(key=lambda x: x['bbox'][0])
  418. # 🎯 关键改进:提取 HTML 单元格并预先确定列边界
  419. html_cells = row.find_all(['td', 'th'])
  420. if not html_cells:
  421. continue
  422. # 🔑 预估列边界(基于 x 坐标分布)
  423. col_boundaries = self._estimate_column_boundaries(
  424. current_boxes,
  425. len(html_cells)
  426. )
  427. print(f" 行 {row_idx + 1}: {len(html_cells)} 列,边界: {col_boundaries}")
  428. # 🔑 按列分配 boxes
  429. for col_idx, cell in enumerate(html_cells):
  430. cell_text = cell.get_text(strip=True)
  431. if not cell_text:
  432. continue
  433. # 🎯 获取该列范围内的所有 boxes
  434. col_boxes = self._get_boxes_in_column(
  435. current_boxes,
  436. col_boundaries,
  437. col_idx
  438. )
  439. if not col_boxes:
  440. continue
  441. # 🎯 尝试匹配并合并
  442. matched_result = self._match_and_merge_boxes_for_cell(
  443. cell_text,
  444. col_boxes
  445. )
  446. if matched_result:
  447. merged_bbox = matched_result['bbox']
  448. merged_text = matched_result['text']
  449. cell['data-bbox'] = f"[{merged_bbox[0]},{merged_bbox[1]},{merged_bbox[2]},{merged_bbox[3]}]"
  450. cell['data-score'] = f"{matched_result['score']:.4f}"
  451. cell['data-paddle-indices'] = str(matched_result['paddle_indices'])
  452. cells.append({
  453. 'type': 'table_cell',
  454. 'text': cell_text,
  455. 'matched_text': merged_text,
  456. 'bbox': merged_bbox,
  457. 'row': row_idx + 1,
  458. 'col': col_idx + 1,
  459. 'score': matched_result['score'],
  460. 'paddle_bbox_indices': matched_result['paddle_indices']
  461. })
  462. # 标记已使用
  463. for box in matched_result['used_boxes']:
  464. box['used'] = True
  465. # 计算新的指针位置
  466. used_count = sum(1 for box in table_region_boxes if box.get('used'))
  467. new_pointer = start_pointer + used_count
  468. print(f" 匹配: {len(cells)} 个单元格")
  469. return str(soup), cells, new_pointer
  470. def _estimate_column_boundaries(self, boxes: List[Dict],
  471. num_cols: int) -> List[Tuple[int, int]]:
  472. """
  473. 估算列边界(改进版:处理同列多文本框)
  474. Args:
  475. boxes: 当前行的所有 boxes(已按 x 排序)
  476. num_cols: HTML 表格的列数
  477. Returns:
  478. 列边界列表 [(x_start, x_end), ...]
  479. """
  480. if not boxes:
  481. return []
  482. # 🔑 关键改进:先按 x 坐标聚类(合并同列的多个文本框)
  483. x_clusters = self._cluster_boxes_by_x(boxes, x_tolerance=10)
  484. print(f" X聚类: {len(boxes)} 个boxes -> {len(x_clusters)} 个列簇")
  485. # 获取所有 x 坐标范围
  486. x_min = min(cluster['x_min'] for cluster in x_clusters)
  487. x_max = max(cluster['x_max'] for cluster in x_clusters)
  488. # 🎯 策略 1: 如果聚类数量与列数接近
  489. if len(x_clusters) == num_cols:
  490. # 直接使用聚类边界
  491. boundaries = [(cluster['x_min'], cluster['x_max'])
  492. for cluster in x_clusters]
  493. return boundaries
  494. # # 🎯 策略 2: 聚类数少于列数(某些列没有文本)
  495. # if len(x_clusters) < num_cols:
  496. # print(f" ⚠️ 聚类数 {len(x_clusters)} < 列数 {num_cols},使用均分策略")
  497. # # 使用聚类的间隙来推断缺失的列边界
  498. # cluster_centers = [(c['x_min'] + c['x_max']) / 2 for c in x_clusters]
  499. # # 计算平均列宽
  500. # if len(cluster_centers) > 1:
  501. # avg_gap = (x_max - x_min) / (num_cols - 1)
  502. # else:
  503. # avg_gap = 100 # 默认列宽
  504. # # 生成边界
  505. # boundaries = []
  506. # prev_x = x_min
  507. # for i in range(num_cols):
  508. # if i < len(x_clusters):
  509. # # 使用实际聚类
  510. # boundaries.append((x_clusters[i]['x_min'], x_clusters[i]['x_max']))
  511. # prev_x = x_clusters[i]['x_max']
  512. # else:
  513. # # 推断缺失列
  514. # next_x = prev_x + avg_gap
  515. # boundaries.append((prev_x, next_x))
  516. # prev_x = next_x
  517. # return boundaries
  518. # 🎯 策略 3: 聚类数多于列数(某些列有多个文本簇)
  519. if len(x_clusters) > num_cols:
  520. print(f" ℹ️ 聚类数 {len(x_clusters)} > 列数 {num_cols},合并相近簇")
  521. # 合并相近的簇
  522. merged_clusters = self._merge_close_clusters(x_clusters, num_cols)
  523. boundaries = [(cluster['x_min'], cluster['x_max'])
  524. for cluster in merged_clusters]
  525. return boundaries
  526. return []
  527. def _cluster_boxes_by_x(self, boxes: List[Dict],
  528. x_tolerance: int = 50) -> List[Dict]:
  529. """
  530. 按 x 坐标聚类(合并同列的多个文本框)
  531. Args:
  532. boxes: 文本框列表
  533. x_tolerance: X坐标容忍度
  534. Returns:
  535. 聚类列表 [{'x_min': int, 'x_max': int, 'boxes': List[Dict]}, ...]
  536. """
  537. if not boxes:
  538. return []
  539. # 按左边界 x 坐标排序
  540. sorted_boxes = sorted(boxes, key=lambda b: b['bbox'][0])
  541. clusters = []
  542. current_cluster = None
  543. for box in sorted_boxes:
  544. bbox = box['bbox']
  545. x_start = bbox[0]
  546. x_end = bbox[2]
  547. if current_cluster is None:
  548. # 开始新簇
  549. current_cluster = {
  550. 'x_min': x_start,
  551. 'x_max': x_end,
  552. 'boxes': [box]
  553. }
  554. else:
  555. # 检查是否属于当前簇
  556. # 🔑 条件:x 坐标重叠或接近
  557. overlap = not (x_start > current_cluster['x_max'] + x_tolerance or
  558. x_end < current_cluster['x_min'] - x_tolerance)
  559. if overlap or abs(x_start - current_cluster['x_min']) <= x_tolerance:
  560. # 合并到当前簇
  561. current_cluster['boxes'].append(box)
  562. current_cluster['x_min'] = min(current_cluster['x_min'], x_start)
  563. current_cluster['x_max'] = max(current_cluster['x_max'], x_end)
  564. else:
  565. # 保存当前簇,开始新簇
  566. clusters.append(current_cluster)
  567. current_cluster = {
  568. 'x_min': x_start,
  569. 'x_max': x_end,
  570. 'boxes': [box]
  571. }
  572. # 添加最后一簇
  573. if current_cluster:
  574. clusters.append(current_cluster)
  575. return clusters
  576. def _merge_close_clusters(self, clusters: List[Dict],
  577. target_count: int) -> List[Dict]:
  578. """
  579. 合并相近的簇,直到数量等于目标列数
  580. Args:
  581. clusters: 聚类列表
  582. target_count: 目标列数
  583. Returns:
  584. 合并后的聚类列表
  585. """
  586. if len(clusters) <= target_count:
  587. return clusters
  588. # 复制一份,避免修改原数据
  589. working_clusters = [c.copy() for c in clusters]
  590. while len(working_clusters) > target_count:
  591. # 找到距离最近的两个簇
  592. min_distance = float('inf')
  593. merge_idx = 0
  594. for i in range(len(working_clusters) - 1):
  595. distance = working_clusters[i + 1]['x_min'] - working_clusters[i]['x_max']
  596. if distance < min_distance:
  597. min_distance = distance
  598. merge_idx = i
  599. # 合并
  600. cluster1 = working_clusters[merge_idx]
  601. cluster2 = working_clusters[merge_idx + 1]
  602. merged_cluster = {
  603. 'x_min': cluster1['x_min'],
  604. 'x_max': cluster2['x_max'],
  605. 'boxes': cluster1['boxes'] + cluster2['boxes']
  606. }
  607. # 替换
  608. working_clusters[merge_idx] = merged_cluster
  609. working_clusters.pop(merge_idx + 1)
  610. return working_clusters
  611. def _get_boxes_in_column(self, boxes: List[Dict],
  612. boundaries: List[Tuple[int, int]],
  613. col_idx: int) -> List[Dict]:
  614. """
  615. 获取指定列范围内的 boxes(改进版:包含重叠)
  616. Args:
  617. boxes: 当前行的所有 boxes
  618. boundaries: 列边界
  619. col_idx: 列索引
  620. Returns:
  621. 该列的 boxes
  622. """
  623. if col_idx >= len(boundaries):
  624. return []
  625. x_start, x_end = boundaries[col_idx]
  626. col_boxes = []
  627. for box in boxes:
  628. bbox = box['bbox']
  629. box_x_start = bbox[0]
  630. box_x_end = bbox[2]
  631. # 🔑 改进:检查是否有重叠(不只是中心点)
  632. overlap = not (box_x_start > x_end or box_x_end < x_start)
  633. if overlap:
  634. col_boxes.append(box)
  635. return col_boxes
  636. def _match_and_merge_boxes_for_cell(self, cell_text: str,
  637. col_boxes: List[Dict]) -> Optional[Dict]:
  638. """
  639. 匹配并合并单元格的多个 boxes
  640. 策略:
  641. 1. 尝试单个 box 精确匹配
  642. 2. 如果失败,尝试合并多个 boxes
  643. Args:
  644. cell_text: HTML 单元格文本
  645. col_boxes: 该列的候选 boxes
  646. Returns:
  647. {'bbox': [x1,y1,x2,y2], 'text': str, 'score': float,
  648. 'paddle_indices': [idx1, idx2], 'used_boxes': [box1, box2]}
  649. """
  650. from fuzzywuzzy import fuzz
  651. cell_text_normalized = self.text_matcher.normalize_text(cell_text)
  652. if len(cell_text_normalized) < 2:
  653. return None
  654. # 🔑 策略 1: 单个 box 精确匹配
  655. for box in col_boxes:
  656. if box.get('used'):
  657. continue
  658. box_text = self.text_matcher.normalize_text(box['text'])
  659. if cell_text_normalized == box_text:
  660. return {
  661. 'bbox': box['bbox'],
  662. 'text': box['text'],
  663. 'score': box['score'],
  664. 'paddle_indices': [box['paddle_bbox_index']],
  665. 'used_boxes': [box]
  666. }
  667. # 🔑 策略 2: 多个 boxes 合并匹配
  668. unused_boxes = [b for b in col_boxes if not b.get('used')]
  669. if not unused_boxes:
  670. return None
  671. # 尝试不同的组合长度
  672. for combo_len in range(1, min(len(unused_boxes) + 1, 5)):
  673. # 按 y 坐标排序(上到下)
  674. sorted_boxes = sorted(unused_boxes, key=lambda b: b['bbox'][1])
  675. # 滑动窗口
  676. for start_idx in range(len(sorted_boxes) - combo_len + 1):
  677. combo_boxes = sorted_boxes[start_idx:start_idx + combo_len]
  678. # 合并文本
  679. merged_text = ''.join([b['text'] for b in combo_boxes])
  680. merged_text_normalized = self.text_matcher.normalize_text(merged_text)
  681. # 计算相似度
  682. similarity = fuzz.partial_ratio(cell_text_normalized, merged_text_normalized)
  683. if similarity >= 85: # 高阈值
  684. # 合并 bbox
  685. merged_bbox = [
  686. min(b['bbox'][0] for b in combo_boxes),
  687. min(b['bbox'][1] for b in combo_boxes),
  688. max(b['bbox'][2] for b in combo_boxes),
  689. max(b['bbox'][3] for b in combo_boxes)
  690. ]
  691. return {
  692. 'bbox': merged_bbox,
  693. 'text': merged_text,
  694. 'score': sum(b['score'] for b in combo_boxes) / len(combo_boxes),
  695. 'paddle_indices': [b['paddle_bbox_index'] for b in combo_boxes],
  696. 'used_boxes': combo_boxes
  697. }
  698. # 🔑 策略 3: 降级匹配(单个最佳)
  699. best_box = None
  700. best_score = 0
  701. for box in unused_boxes:
  702. box_text = self.text_matcher.normalize_text(box['text'])
  703. score = fuzz.partial_ratio(cell_text_normalized, box_text)
  704. if score > best_score:
  705. best_score = score
  706. best_box = box
  707. if best_box and best_score >= 70:
  708. return {
  709. 'bbox': best_box['bbox'],
  710. 'text': best_box['text'],
  711. 'score': best_box['score'],
  712. 'paddle_indices': [best_box['paddle_bbox_index']],
  713. 'used_boxes': [best_box]
  714. }
  715. return None
  716. def _filter_boxes_in_table_region(self, paddle_boxes: List[Dict],
  717. table_bbox: Optional[List[int]],
  718. html: str) -> Tuple[List[Dict], List[int]]:
  719. """
  720. 筛选表格区域内的 paddle boxes
  721. 策略:
  722. 1. 如果有 table_bbox,使用边界框筛选(扩展边界)
  723. 2. 如果没有 table_bbox,通过内容匹配推断区域
  724. Args:
  725. paddle_boxes: paddle OCR 结果
  726. table_bbox: 表格边界框 [x1, y1, x2, y2]
  727. html: HTML 内容(用于内容验证)
  728. Returns:
  729. (筛选后的 boxes, 实际表格边界框)
  730. """
  731. if not paddle_boxes:
  732. return [], [0, 0, 0, 0]
  733. # 🎯 策略 1: 使用提供的 table_bbox(扩展边界)
  734. if table_bbox and len(table_bbox) == 4:
  735. x1, y1, x2, y2 = table_bbox
  736. # 扩展边界(考虑边框外的文本)
  737. margin = 20
  738. expanded_bbox = [
  739. max(0, x1 - margin),
  740. max(0, y1 - margin),
  741. x2 + margin,
  742. y2 + margin
  743. ]
  744. filtered = []
  745. for box in paddle_boxes:
  746. bbox = box['bbox']
  747. box_center_x = (bbox[0] + bbox[2]) / 2
  748. box_center_y = (bbox[1] + bbox[3]) / 2
  749. # 中心点在扩展区域内
  750. if (expanded_bbox[0] <= box_center_x <= expanded_bbox[2] and
  751. expanded_bbox[1] <= box_center_y <= expanded_bbox[3]):
  752. filtered.append(box)
  753. if filtered:
  754. # 计算实际边界框
  755. actual_bbox = [
  756. min(b['bbox'][0] for b in filtered),
  757. min(b['bbox'][1] for b in filtered),
  758. max(b['bbox'][2] for b in filtered),
  759. max(b['bbox'][3] for b in filtered)
  760. ]
  761. return filtered, actual_bbox
  762. # 🎯 策略 2: 通过内容匹配推断区域
  763. print(" ℹ️ 无 table_bbox,使用内容匹配推断表格区域...")
  764. # 提取 HTML 中的所有文本
  765. from bs4 import BeautifulSoup
  766. soup = BeautifulSoup(html, 'html.parser')
  767. html_texts = set()
  768. for cell in soup.find_all(['td', 'th']):
  769. text = cell.get_text(strip=True)
  770. if text:
  771. html_texts.add(self.text_matcher.normalize_text(text))
  772. if not html_texts:
  773. return [], [0, 0, 0, 0]
  774. # 找出与 HTML 内容匹配的 boxes
  775. matched_boxes = []
  776. for box in paddle_boxes:
  777. normalized_text = self.text_matcher.normalize_text(box['text'])
  778. # 检查是否匹配
  779. if any(normalized_text in ht or ht in normalized_text
  780. for ht in html_texts):
  781. matched_boxes.append(box)
  782. if not matched_boxes:
  783. # 🔑 降级:如果精确匹配失败,使用模糊匹配
  784. print(" ℹ️ 精确匹配失败,尝试模糊匹配...")
  785. from fuzzywuzzy import fuzz
  786. for box in paddle_boxes:
  787. normalized_text = self.text_matcher.normalize_text(box['text'])
  788. for ht in html_texts:
  789. similarity = fuzz.partial_ratio(normalized_text, ht)
  790. if similarity >= 70: # 降低阈值
  791. matched_boxes.append(box)
  792. break
  793. if matched_boxes:
  794. # 计算边界框
  795. actual_bbox = [
  796. min(b['bbox'][0] for b in matched_boxes),
  797. min(b['bbox'][1] for b in matched_boxes),
  798. max(b['bbox'][2] for b in matched_boxes),
  799. max(b['bbox'][3] for b in matched_boxes)
  800. ]
  801. # 🔑 扩展边界,包含可能遗漏的文本
  802. margin = 30
  803. expanded_bbox = [
  804. max(0, actual_bbox[0] - margin),
  805. max(0, actual_bbox[1] - margin),
  806. actual_bbox[2] + margin,
  807. actual_bbox[3] + margin
  808. ]
  809. # 重新筛选(包含边界上的文本)
  810. final_filtered = []
  811. for box in paddle_boxes:
  812. bbox = box['bbox']
  813. box_center_x = (bbox[0] + bbox[2]) / 2
  814. box_center_y = (bbox[1] + bbox[3]) / 2
  815. if (expanded_bbox[0] <= box_center_x <= expanded_bbox[2] and
  816. expanded_bbox[1] <= box_center_y <= expanded_bbox[3]):
  817. final_filtered.append(box)
  818. return final_filtered, actual_bbox
  819. # 🔑 最后的降级:返回所有 boxes
  820. print(" ⚠️ 无法确定表格区域,使用所有 paddle boxes")
  821. if paddle_boxes:
  822. actual_bbox = [
  823. min(b['bbox'][0] for b in paddle_boxes),
  824. min(b['bbox'][1] for b in paddle_boxes),
  825. max(b['bbox'][2] for b in paddle_boxes),
  826. max(b['bbox'][3] for b in paddle_boxes)
  827. ]
  828. return paddle_boxes, actual_bbox
  829. return [], [0, 0, 0, 0]
  830. def _group_paddle_boxes_by_rows(self, paddle_boxes: List[Dict],
  831. y_tolerance: int = 20) -> List[Dict]:
  832. """
  833. 将 paddle_text_boxes 按 y 坐标分组(聚类)
  834. Args:
  835. paddle_boxes: Paddle OCR 文字框列表
  836. y_tolerance: Y 坐标容忍度(像素)
  837. Returns:
  838. 分组列表,每组包含 {'y_center': float, 'boxes': List[Dict]}
  839. """
  840. if not paddle_boxes:
  841. return []
  842. # 计算每个 box 的中心 y 坐标
  843. boxes_with_y = []
  844. for box in paddle_boxes:
  845. bbox = box['bbox']
  846. y_center = (bbox[1] + bbox[3]) / 2
  847. boxes_with_y.append({
  848. 'y_center': y_center,
  849. 'box': box
  850. })
  851. # 按 y 坐标排序
  852. boxes_with_y.sort(key=lambda x: x['y_center'])
  853. # 聚类
  854. groups = []
  855. current_group = None
  856. for item in boxes_with_y:
  857. if current_group is None:
  858. # 开始新组
  859. current_group = {
  860. 'y_center': item['y_center'],
  861. 'boxes': [item['box']]
  862. }
  863. else:
  864. # 检查是否属于当前组
  865. if abs(item['y_center'] - current_group['y_center']) <= y_tolerance:
  866. current_group['boxes'].append(item['box'])
  867. # 更新组的中心(使用平均值)
  868. current_group['y_center'] = sum(
  869. b['bbox'][1] + b['bbox'][3] for b in current_group['boxes']
  870. ) / (2 * len(current_group['boxes']))
  871. else:
  872. # 保存当前组,开始新组
  873. groups.append(current_group)
  874. current_group = {
  875. 'y_center': item['y_center'],
  876. 'boxes': [item['box']]
  877. }
  878. # 添加最后一组
  879. if current_group:
  880. groups.append(current_group)
  881. return groups
  882. def _find_best_match_in_group(self, target_text: str, boxes: List[Dict],
  883. start_idx: int = 0) -> Optional[Dict]:
  884. """
  885. 在给定的 boxes 列表中查找最佳匹配(已按 x 坐标排序)
  886. Args:
  887. target_text: 目标文本
  888. boxes: 候选 boxes(已排序)
  889. start_idx: 起始索引
  890. Returns:
  891. 最佳匹配的 box 或 None
  892. """
  893. target_text = self.text_matcher.normalize_text(target_text)
  894. if len(target_text) < 2:
  895. return None
  896. best_match = None
  897. best_score = 0
  898. # 优先从 start_idx 开始查找
  899. search_range = list(range(start_idx, len(boxes))) + list(range(0, start_idx))
  900. for idx in search_range:
  901. box = boxes[idx]
  902. if box.get('used'):
  903. continue
  904. box_text = self.text_matcher.normalize_text(box['text'])
  905. # 精确匹配
  906. if target_text == box_text:
  907. return box
  908. # 长度比例检查
  909. length_ratio = min(len(target_text), len(box_text)) / max(len(target_text), len(box_text))
  910. if length_ratio < 0.35:
  911. continue
  912. # 子串检查
  913. shorter = target_text if len(target_text) < len(box_text) else box_text
  914. longer = box_text if len(target_text) < len(box_text) else target_text
  915. is_substring = shorter in longer
  916. # 计算相似度
  917. from fuzzywuzzy import fuzz
  918. partial_ratio = fuzz.partial_ratio(target_text, box_text)
  919. if is_substring:
  920. partial_ratio += 10
  921. if partial_ratio >= self.text_matcher.similarity_threshold:
  922. if partial_ratio > best_score:
  923. best_score = partial_ratio
  924. best_match = box
  925. return best_match
  926. def _match_html_rows_to_paddle_groups(self, html_rows: List,
  927. grouped_boxes: List[Dict]) -> Dict[int, List[int]]:
  928. """
  929. 智能匹配 HTML 行与 paddle 分组(改进版:处理跨行文本)
  930. 策略:
  931. 1. 第一遍:基于内容精确匹配
  932. 2. 第二遍:将未使用的组合并到相邻已匹配的行
  933. """
  934. if not html_rows or not grouped_boxes:
  935. return {}
  936. mapping = {}
  937. # 🎯 策略 1: 数量相等,简单 1:1 映射
  938. if len(html_rows) == len(grouped_boxes):
  939. for i in range(len(html_rows)):
  940. mapping[i] = [i]
  941. return mapping
  942. # 🎯 策略 2: 第一遍 - 基于内容精确匹配
  943. used_groups = set()
  944. for row_idx, row in enumerate(html_rows):
  945. row_texts = [cell.get_text(strip=True) for cell in row.find_all(['td', 'th'])]
  946. row_texts = [t for t in row_texts if t]
  947. if not row_texts:
  948. mapping[row_idx] = []
  949. continue
  950. row_text_normalized = [self.text_matcher.normalize_text(t) for t in row_texts]
  951. # 查找最匹配的 paddle 组
  952. best_groups = []
  953. best_score = 0
  954. # 尝试匹配单个组
  955. for group_idx, group in enumerate(grouped_boxes):
  956. if group_idx in used_groups:
  957. continue
  958. group_texts = [self.text_matcher.normalize_text(b['text'])
  959. for b in group['boxes'] if not b.get('used')]
  960. match_count = sum(1 for rt in row_text_normalized
  961. if any(rt in gt or gt in rt for gt in group_texts))
  962. coverage = match_count / len(row_texts) if row_texts else 0
  963. if coverage > best_score:
  964. best_score = coverage
  965. best_groups = [group_idx]
  966. # 🔑 如果单组匹配度不高,尝试匹配多个连续组
  967. if best_score < 0.5:
  968. # 从当前位置向后查找
  969. start_group = min([g for g in range(len(grouped_boxes)) if g not in used_groups],
  970. default=0)
  971. combined_texts = []
  972. combined_groups = []
  973. for group_idx in range(start_group, min(start_group + 5, len(grouped_boxes))):
  974. if group_idx in used_groups:
  975. continue
  976. combined_groups.append(group_idx)
  977. combined_texts.extend([
  978. self.text_matcher.normalize_text(b['text'])
  979. for b in grouped_boxes[group_idx]['boxes']
  980. if not b.get('used')
  981. ])
  982. match_count = sum(1 for rt in row_text_normalized
  983. if any(rt in gt or gt in rt for gt in combined_texts))
  984. coverage = match_count / len(row_texts) if row_texts else 0
  985. if coverage > best_score:
  986. best_score = coverage
  987. best_groups = combined_groups.copy()
  988. # 记录映射
  989. if best_groups and best_score > 0.3:
  990. mapping[row_idx] = best_groups
  991. used_groups.update(best_groups)
  992. else:
  993. # 降级策略:位置推测
  994. estimated_group = min(row_idx, len(grouped_boxes) - 1)
  995. if estimated_group not in used_groups:
  996. mapping[row_idx] = [estimated_group]
  997. used_groups.add(estimated_group)
  998. else:
  999. mapping[row_idx] = []
  1000. # 🎯 策略 3: 第二遍 - 处理未使用的组(关键!)
  1001. unused_groups = [i for i in range(len(grouped_boxes)) if i not in used_groups]
  1002. if unused_groups:
  1003. print(f" ℹ️ 发现 {len(unused_groups)} 个未匹配的 paddle 组: {unused_groups}")
  1004. # 🔑 将未使用的组合并到相邻的已匹配行
  1005. for unused_idx in unused_groups:
  1006. # 策略:合并到最近的上方或下方已匹配行
  1007. # 1. 查找该组的 y 坐标
  1008. unused_y = grouped_boxes[unused_idx]['y_center']
  1009. # 2. 找到最近的已使用组
  1010. closest_used_idx = None
  1011. min_distance = float('inf')
  1012. for used_idx in sorted(used_groups):
  1013. distance = abs(grouped_boxes[used_idx]['y_center'] - unused_y)
  1014. if distance < min_distance:
  1015. min_distance = distance
  1016. closest_used_idx = used_idx
  1017. if closest_used_idx is not None:
  1018. # 3. 找到该组对应的 HTML 行
  1019. target_html_row = None
  1020. for html_row_idx, group_indices in mapping.items():
  1021. if closest_used_idx in group_indices:
  1022. target_html_row = html_row_idx
  1023. break
  1024. if target_html_row is not None:
  1025. # 4. 判断合并方向(基于 y 坐标)
  1026. if unused_y < grouped_boxes[closest_used_idx]['y_center']:
  1027. # 未使用组在上方,可能是上一行的跨列文本
  1028. if target_html_row > 0:
  1029. # 合并到上一行
  1030. if target_html_row - 1 in mapping:
  1031. if unused_idx not in mapping[target_html_row - 1]:
  1032. mapping[target_html_row - 1].append(unused_idx)
  1033. print(f" • 组 {unused_idx} 合并到 HTML 行 {target_html_row - 1}(上方)")
  1034. else:
  1035. # 合并到当前行
  1036. if unused_idx not in mapping[target_html_row]:
  1037. mapping[target_html_row].append(unused_idx)
  1038. print(f" • 组 {unused_idx} 合并到 HTML 行 {target_html_row}(当前)")
  1039. else:
  1040. # 未使用组在下方,可能是当前行的跨列文本
  1041. if unused_idx not in mapping[target_html_row]:
  1042. mapping[target_html_row].append(unused_idx)
  1043. print(f" • 组 {unused_idx} 合并到 HTML 行 {target_html_row}(下方)")
  1044. used_groups.add(unused_idx)
  1045. # 🔑 策略 4: 第三遍 - 按 y 坐标排序每行的组索引
  1046. for row_idx in mapping:
  1047. if mapping[row_idx]:
  1048. mapping[row_idx].sort(key=lambda idx: grouped_boxes[idx]['y_center'])
  1049. return mapping