""" 数据处理模块 负责处理 MinerU/PaddleOCR_VL/DotsOCR 数据,添加 bbox 信息 """ from typing import List, Dict, Tuple, Optional from bs4 import BeautifulSoup try: from .text_matcher import TextMatcher from .bbox_extractor import BBoxExtractor except ImportError: from text_matcher import TextMatcher from bbox_extractor import BBoxExtractor class DataProcessor: """数据处理器""" def __init__(self, text_matcher: TextMatcher, look_ahead_window: int = 10): """ Args: text_matcher: 文本匹配器 look_ahead_window: 向前查找窗口 """ self.text_matcher = text_matcher self.look_ahead_window = look_ahead_window def process_mineru_data(self, mineru_data: List[Dict], paddle_text_boxes: List[Dict]) -> List[Dict]: """ 处理 MinerU 数据,添加 bbox 信息 Args: mineru_data: MinerU 数据 paddle_text_boxes: PaddleOCR 文字框列表 Returns: 合并后的数据, table cell使用paddle的bbox,其他类型只是移动指针,bbox还是沿用minerU的bbox """ merged_data = [] paddle_pointer = 0 last_matched_index = 0 # 按 bbox 排序 mineru_data.sort( key=lambda x: (x['bbox'][1], x['bbox'][0]) if 'bbox' in x else (float('inf'), float('inf')) ) for item in mineru_data: item_type = item.get('type', '') if item_type == 'table': merged_item, paddle_pointer = self._process_table( item, paddle_text_boxes, paddle_pointer ) merged_data.append(merged_item) elif item_type in ['text', 'title']: merged_item, paddle_pointer, last_matched_index = self._process_text( item, paddle_text_boxes, paddle_pointer, last_matched_index ) merged_data.append(merged_item) elif item_type == 'list': merged_item, paddle_pointer, last_matched_index = self._process_list( item, paddle_text_boxes, paddle_pointer, last_matched_index ) merged_data.append(merged_item) else: merged_data.append(item.copy()) return merged_data def process_dotsocr_data(self, dotsocr_data: List[Dict], paddle_text_boxes: List[Dict]) -> List[Dict]: """ 🎯 处理 DotsOCR 数据,转换为 MinerU 格式并添加 bbox 信息 Args: dotsocr_data: DotsOCR 数据 paddle_text_boxes: PaddleOCR 文字框列表 Returns: MinerU 格式的合并数据 """ merged_data = [] paddle_pointer = 0 last_matched_index = 0 # 按 bbox 排序 dotsocr_data.sort( key=lambda x: (x['bbox'][1], x['bbox'][0]) if 'bbox' in x else (float('inf'), float('inf')) ) for item in dotsocr_data: # 🎯 转换为 MinerU 格式 mineru_item = self._convert_dotsocr_to_mineru(item) category = mineru_item.get('type', '') # 🎯 根据类型处理 if category.lower() == 'table': merged_item, paddle_pointer = self._process_table( mineru_item, paddle_text_boxes, paddle_pointer ) merged_data.append(merged_item) elif category.lower() in ['text', 'title', 'header', 'footer']: merged_item, paddle_pointer, last_matched_index = self._process_text( mineru_item, paddle_text_boxes, paddle_pointer, last_matched_index ) merged_data.append(merged_item) elif category.lower() == 'list': merged_item, paddle_pointer, last_matched_index = self._process_list( mineru_item, paddle_text_boxes, paddle_pointer, last_matched_index ) merged_data.append(merged_item) else: # Page-header, Page-footer, Picture 等 merged_data.append(mineru_item) return merged_data def _convert_dotsocr_to_mineru(self, dotsocr_item: Dict) -> Dict: """ 🎯 将 DotsOCR 格式转换为 MinerU 格式 DotsOCR: { "category": "Table", "bbox": [x1, y1, x2, y2], "text": "..." } MinerU: { "type": "table", "bbox": [x1, y1, x2, y2], "table_body": "...", "page_idx": 0 } """ category = dotsocr_item.get('category', '') # 🎯 Category 映射 category_map = { 'Page-header': 'header', 'Page-footer': 'footer', 'Picture': 'image', 'Figure': 'image', 'Section-header': 'title', 'Table': 'table', 'Text': 'text', 'Title': 'title', 'List': 'list', 'Caption': 'title' } mineru_type = category_map.get(category, 'text') # 🎯 基础转换 mineru_item = { 'type': mineru_type, 'bbox': dotsocr_item.get('bbox', []), 'page_idx': 0 # DotsOCR 默认单页 } # 🎯 处理文本内容 text = dotsocr_item.get('text', '') if mineru_type == 'table': # 表格:text -> table_body mineru_item['table_body'] = text else: # 其他类型:保持 text mineru_item['text'] = text # 标题级别 if category == 'Section-header': mineru_item['text_level'] = 1 return mineru_item def process_paddleocr_vl_data(self, paddleocr_vl_data: Dict, paddle_text_boxes: List[Dict]) -> List[Dict]: """ 处理 PaddleOCR_VL 数据,添加 bbox 信息 Args: paddleocr_vl_data: PaddleOCR_VL 数据 (JSON 对象) paddle_text_boxes: PaddleOCR 文字框列表 Returns: 🎯 MinerU 格式的合并数据(统一输出格式) """ merged_data = [] paddle_pointer = 0 last_matched_index = 0 # 🎯 获取旋转角度和原始图像尺寸 rotation_angle = self._get_rotation_angle_from_vl(paddleocr_vl_data) orig_image_size = None if rotation_angle != 0: orig_image_size = self._get_original_image_size_from_vl(paddleocr_vl_data) print(f"🔄 PaddleOCR_VL 检测到旋转角度: {rotation_angle}°") print(f"📐 原始图像尺寸: {orig_image_size[0]} x {orig_image_size[1]}") # 提取 parsing_res_list parsing_res_list = paddleocr_vl_data.get('parsing_res_list', []) # 按 bbox 排序 parsing_res_list.sort( key=lambda x: (x['block_bbox'][1], x['block_bbox'][0]) if 'block_bbox' in x else (float('inf'), float('inf')) ) for item in parsing_res_list: # 🎯 先转换 bbox 坐标(如果需要) if rotation_angle != 0 and orig_image_size: item = self._transform_vl_block_bbox(item, rotation_angle, orig_image_size) # 🎯 统一转换为 MinerU 格式 mineru_item = self._convert_paddleocr_vl_to_mineru(item) item_type = mineru_item.get('type', '') # 🎯 根据类型处理(复用 MinerU 的通用方法) if item_type == 'table': merged_item, paddle_pointer = self._process_table( mineru_item, paddle_text_boxes, paddle_pointer ) merged_data.append(merged_item) elif item_type in ['text', 'title', 'header', 'footer', 'equation']: merged_item, paddle_pointer, last_matched_index = self._process_text( mineru_item, paddle_text_boxes, paddle_pointer, last_matched_index ) merged_data.append(merged_item) elif item_type == 'list': merged_item, paddle_pointer, last_matched_index = self._process_list( mineru_item, paddle_text_boxes, paddle_pointer, last_matched_index ) merged_data.append(merged_item) else: # 其他类型(image 等)直接添加 merged_data.append(mineru_item) return merged_data def _get_rotation_angle_from_vl(self, paddleocr_vl_data: Dict) -> float: """从 PaddleOCR_VL 数据中获取旋转角度""" return BBoxExtractor._get_rotation_angle(paddleocr_vl_data) def _get_original_image_size_from_vl(self, paddleocr_vl_data: Dict) -> tuple: """从 PaddleOCR_VL 数据中获取原始图像尺寸""" return BBoxExtractor._get_original_image_size(paddleocr_vl_data) def _transform_vl_block_bbox(self, item: Dict, angle: float, orig_image_size: tuple) -> Dict: """ 转换 PaddleOCR_VL 的 block_bbox 坐标 Args: item: PaddleOCR_VL 的 block 数据 angle: 旋转角度 orig_image_size: 原始图像尺寸 Returns: 转换后的 block 数据 """ transformed_item = item.copy() if 'block_bbox' not in item: return transformed_item block_bbox = item['block_bbox'] if len(block_bbox) < 4: return transformed_item # block_bbox 格式: [x1, y1, x2, y2] # 转换为 poly 格式进行旋转 poly = [ [block_bbox[0], block_bbox[1]], # 左上 [block_bbox[2], block_bbox[1]], # 右上 [block_bbox[2], block_bbox[3]], # 右下 [block_bbox[0], block_bbox[3]] # 左下 ] # 🎯 使用 BBoxExtractor 的坐标转换方法 transformed_poly = BBoxExtractor._inverse_rotate_coordinates( poly, angle, orig_image_size ) # 转换回 bbox 格式 xs = [p[0] for p in transformed_poly] ys = [p[1] for p in transformed_poly] transformed_bbox = [min(xs), min(ys), max(xs), max(ys)] transformed_item['block_bbox'] = transformed_bbox return transformed_item def _convert_paddleocr_vl_to_mineru(self, paddleocr_vl_item: Dict) -> Dict: """ 🎯 将 PaddleOCR_VL 格式转换为 MinerU 格式 基于 PP-DocLayout_plus-L 的 20 种类别 """ block_label = paddleocr_vl_item.get('block_label', '') # 🎯 PP-DocLayout_plus-L 类别映射(共 20 种) label_map = { # 标题类(3种) 'paragraph_title': 'title', 'doc_title': 'title', 'figure_table_chart_title': 'title', # 文本类(9种) 'text': 'text', 'number': 'text', 'content': 'text', 'abstract': 'text', 'footnote': 'text', 'aside_text': 'text', 'algorithm': 'text', 'reference': 'text', 'reference_content': 'text', # 页眉页脚(2种) 'header': 'header', 'footer': 'footer', # 表格(1种) 'table': 'table', # 图片/图表(3种) 'image': 'image', 'chart': 'image', 'seal': 'image', # 公式(2种) 'formula': 'equation', 'formula_number': 'equation' } mineru_type = label_map.get(block_label, 'text') mineru_item = { 'type': mineru_type, 'bbox': paddleocr_vl_item.get('block_bbox', []), 'page_idx': 0 } content = paddleocr_vl_item.get('block_content', '') if mineru_type == 'table': mineru_item['table_body'] = content else: mineru_item['text'] = content # 标题级别 if block_label == 'doc_title': mineru_item['text_level'] = 1 elif block_label == 'paragraph_title': mineru_item['text_level'] = 2 elif block_label == 'figure_table_chart_title': mineru_item['text_level'] = 3 return mineru_item def _process_table(self, item: Dict, paddle_text_boxes: List[Dict], start_pointer: int) -> Tuple[Dict, int]: """ 处理表格类型(MinerU 格式) 策略: - 解析 HTML 表格 - 为每个单元格匹配 PaddleOCR 的 bbox - 返回处理后的表格和新指针位置 """ table_body = item.get('table_body', '') if not table_body: print(f"⚠️ 表格内容为空,跳过") return item, start_pointer try: # 🔑 传入 table_bbox 用于筛选 table_bbox = item.get('bbox') # MinerU 提供的表格边界 enhanced_html, cells, new_pointer = self._enhance_table_html_with_bbox( table_body, paddle_text_boxes, start_pointer, table_bbox # ✅ 传入边界框 ) # 更新 item item['table_body'] = enhanced_html item['table_cells'] = cells # 统计信息 matched_count = len(cells) total_cells = len(BeautifulSoup(table_body, 'html.parser').find_all(['td', 'th'])) print(f" 表格单元格: {matched_count}/{total_cells} 匹配") return item, new_pointer except Exception as e: print(f"⚠️ 表格处理失败: {e}") import traceback traceback.print_exc() return item, start_pointer def _process_text(self, item: Dict, paddle_text_boxes: List[Dict], paddle_pointer: int, last_matched_index: int) -> Tuple[Dict, int, int]: """处理文本""" merged_item = item.copy() text = item.get('text', '') matched_bbox, paddle_pointer, last_matched_index = \ self.text_matcher.find_matching_bbox( text, paddle_text_boxes, paddle_pointer, last_matched_index, self.look_ahead_window ) if matched_bbox: matched_bbox['used'] = True return merged_item, paddle_pointer, last_matched_index def _process_list(self, item: Dict, paddle_text_boxes: List[Dict], paddle_pointer: int, last_matched_index: int) -> Tuple[Dict, int, int]: """处理列表""" merged_item = item.copy() list_items = item.get('list_items', []) for list_item in list_items: matched_bbox, paddle_pointer, last_matched_index = \ self.text_matcher.find_matching_bbox( list_item, paddle_text_boxes, paddle_pointer, last_matched_index, self.look_ahead_window ) if matched_bbox: matched_bbox['used'] = True return merged_item, paddle_pointer, last_matched_index def _enhance_table_html_with_bbox(self, html: str, paddle_text_boxes: List[Dict], start_pointer: int, table_bbox: Optional[List[int]] = None) -> Tuple[str, List[Dict], int]: """ 为 HTML 表格添加 bbox 信息(优化版:先筛选表格区域) 策略: 1. 根据 table_bbox 筛选出表格区域内的 paddle_text_boxes 2. 将筛选后的 boxes 按行分组 3. 智能匹配 HTML 行与 paddle 行组 4. 在匹配的组内查找单元格 Args: html: HTML 表格 paddle_text_boxes: 全部 paddle OCR 结果 start_pointer: 开始位置 table_bbox: 表格边界框 [x1, y1, x2, y2] """ soup = BeautifulSoup(html, 'html.parser') cells = [] # 🔑 第一步:筛选表格区域内的 paddle boxes table_region_boxes, actual_table_bbox = self._filter_boxes_in_table_region( paddle_text_boxes[start_pointer:], table_bbox, html ) if not table_region_boxes: print(f"⚠️ 未在表格区域找到 paddle boxes") return str(soup), cells, start_pointer print(f"📊 表格区域: {len(table_region_boxes)} 个文本框") print(f" 边界: {actual_table_bbox}") # 🔑 第二步:将表格区域的 boxes 按行分组 grouped_boxes = self._group_paddle_boxes_by_rows( table_region_boxes, y_tolerance=20 ) # 🔑 第三步:在每组内按 x 坐标排序 for group in grouped_boxes: group['boxes'].sort(key=lambda x: x['bbox'][0]) grouped_boxes.sort(key=lambda g: g['y_center']) print(f" 分组: {len(grouped_boxes)} 行") # 🔑 第四步:智能匹配 HTML 行与 paddle 行组 html_rows = soup.find_all('tr') row_mapping = self._match_html_rows_to_paddle_groups(html_rows, grouped_boxes) print(f" HTML行: {len(html_rows)} 行") print(f" 映射: {len([v for v in row_mapping.values() if v])} 个有效映射") # 🔑 第五步:遍历 HTML 表格,使用映射关系查找 for row_idx, row in enumerate(html_rows): group_indices = row_mapping.get(row_idx, []) if not group_indices: continue # 合并多个组的 boxes current_boxes = [] for group_idx in group_indices: if group_idx < len(grouped_boxes): current_boxes.extend(grouped_boxes[group_idx]['boxes']) current_boxes.sort(key=lambda x: x['bbox'][0]) # 🎯 关键改进:提取 HTML 单元格并预先确定列边界 html_cells = row.find_all(['td', 'th']) if not html_cells: continue # 🔑 预估列边界(基于 x 坐标分布) col_boundaries = self._estimate_column_boundaries( current_boxes, len(html_cells) ) print(f" 行 {row_idx + 1}: {len(html_cells)} 列,边界: {col_boundaries}") # 🔑 按列分配 boxes for col_idx, cell in enumerate(html_cells): cell_text = cell.get_text(strip=True) if not cell_text: continue # 🎯 获取该列范围内的所有 boxes col_boxes = self._get_boxes_in_column( current_boxes, col_boundaries, col_idx ) if not col_boxes: continue # 🎯 尝试匹配并合并 matched_result = self._match_and_merge_boxes_for_cell( cell_text, col_boxes ) if matched_result: merged_bbox = matched_result['bbox'] merged_text = matched_result['text'] cell['data-bbox'] = f"[{merged_bbox[0]},{merged_bbox[1]},{merged_bbox[2]},{merged_bbox[3]}]" cell['data-score'] = f"{matched_result['score']:.4f}" cell['data-paddle-indices'] = str(matched_result['paddle_indices']) cells.append({ 'type': 'table_cell', 'text': cell_text, 'matched_text': merged_text, 'bbox': merged_bbox, 'row': row_idx + 1, 'col': col_idx + 1, 'score': matched_result['score'], 'paddle_bbox_indices': matched_result['paddle_indices'] }) # 标记已使用 for box in matched_result['used_boxes']: box['used'] = True # 计算新的指针位置 used_count = sum(1 for box in table_region_boxes if box.get('used')) new_pointer = start_pointer + used_count print(f" 匹配: {len(cells)} 个单元格") return str(soup), cells, new_pointer def _estimate_column_boundaries(self, boxes: List[Dict], num_cols: int) -> List[Tuple[int, int]]: """ 估算列边界(改进版:处理同列多文本框) Args: boxes: 当前行的所有 boxes(已按 x 排序) num_cols: HTML 表格的列数 Returns: 列边界列表 [(x_start, x_end), ...] """ if not boxes: return [] # 🔑 关键改进:先按 x 坐标聚类(合并同列的多个文本框) x_clusters = self._cluster_boxes_by_x(boxes, x_tolerance=10) print(f" X聚类: {len(boxes)} 个boxes -> {len(x_clusters)} 个列簇") # 获取所有 x 坐标范围 x_min = min(cluster['x_min'] for cluster in x_clusters) x_max = max(cluster['x_max'] for cluster in x_clusters) # 🎯 策略 1: 如果聚类数量与列数接近 if len(x_clusters) == num_cols: # 直接使用聚类边界 boundaries = [(cluster['x_min'], cluster['x_max']) for cluster in x_clusters] return boundaries # # 🎯 策略 2: 聚类数少于列数(某些列没有文本) # if len(x_clusters) < num_cols: # print(f" ⚠️ 聚类数 {len(x_clusters)} < 列数 {num_cols},使用均分策略") # # 使用聚类的间隙来推断缺失的列边界 # cluster_centers = [(c['x_min'] + c['x_max']) / 2 for c in x_clusters] # # 计算平均列宽 # if len(cluster_centers) > 1: # avg_gap = (x_max - x_min) / (num_cols - 1) # else: # avg_gap = 100 # 默认列宽 # # 生成边界 # boundaries = [] # prev_x = x_min # for i in range(num_cols): # if i < len(x_clusters): # # 使用实际聚类 # boundaries.append((x_clusters[i]['x_min'], x_clusters[i]['x_max'])) # prev_x = x_clusters[i]['x_max'] # else: # # 推断缺失列 # next_x = prev_x + avg_gap # boundaries.append((prev_x, next_x)) # prev_x = next_x # return boundaries # 🎯 策略 3: 聚类数多于列数(某些列有多个文本簇) if len(x_clusters) > num_cols: print(f" ℹ️ 聚类数 {len(x_clusters)} > 列数 {num_cols},合并相近簇") # 合并相近的簇 merged_clusters = self._merge_close_clusters(x_clusters, num_cols) boundaries = [(cluster['x_min'], cluster['x_max']) for cluster in merged_clusters] return boundaries return [] def _cluster_boxes_by_x(self, boxes: List[Dict], x_tolerance: int = 50) -> List[Dict]: """ 按 x 坐标聚类(合并同列的多个文本框) Args: boxes: 文本框列表 x_tolerance: X坐标容忍度 Returns: 聚类列表 [{'x_min': int, 'x_max': int, 'boxes': List[Dict]}, ...] """ if not boxes: return [] # 按左边界 x 坐标排序 sorted_boxes = sorted(boxes, key=lambda b: b['bbox'][0]) clusters = [] current_cluster = None for box in sorted_boxes: bbox = box['bbox'] x_start = bbox[0] x_end = bbox[2] if current_cluster is None: # 开始新簇 current_cluster = { 'x_min': x_start, 'x_max': x_end, 'boxes': [box] } else: # 检查是否属于当前簇 # 🔑 条件:x 坐标重叠或接近 overlap = not (x_start > current_cluster['x_max'] + x_tolerance or x_end < current_cluster['x_min'] - x_tolerance) if overlap or abs(x_start - current_cluster['x_min']) <= x_tolerance: # 合并到当前簇 current_cluster['boxes'].append(box) current_cluster['x_min'] = min(current_cluster['x_min'], x_start) current_cluster['x_max'] = max(current_cluster['x_max'], x_end) else: # 保存当前簇,开始新簇 clusters.append(current_cluster) current_cluster = { 'x_min': x_start, 'x_max': x_end, 'boxes': [box] } # 添加最后一簇 if current_cluster: clusters.append(current_cluster) return clusters def _merge_close_clusters(self, clusters: List[Dict], target_count: int) -> List[Dict]: """ 合并相近的簇,直到数量等于目标列数 Args: clusters: 聚类列表 target_count: 目标列数 Returns: 合并后的聚类列表 """ if len(clusters) <= target_count: return clusters # 复制一份,避免修改原数据 working_clusters = [c.copy() for c in clusters] while len(working_clusters) > target_count: # 找到距离最近的两个簇 min_distance = float('inf') merge_idx = 0 for i in range(len(working_clusters) - 1): distance = working_clusters[i + 1]['x_min'] - working_clusters[i]['x_max'] if distance < min_distance: min_distance = distance merge_idx = i # 合并 cluster1 = working_clusters[merge_idx] cluster2 = working_clusters[merge_idx + 1] merged_cluster = { 'x_min': cluster1['x_min'], 'x_max': cluster2['x_max'], 'boxes': cluster1['boxes'] + cluster2['boxes'] } # 替换 working_clusters[merge_idx] = merged_cluster working_clusters.pop(merge_idx + 1) return working_clusters def _get_boxes_in_column(self, boxes: List[Dict], boundaries: List[Tuple[int, int]], col_idx: int) -> List[Dict]: """ 获取指定列范围内的 boxes(改进版:包含重叠) Args: boxes: 当前行的所有 boxes boundaries: 列边界 col_idx: 列索引 Returns: 该列的 boxes """ if col_idx >= len(boundaries): return [] x_start, x_end = boundaries[col_idx] col_boxes = [] for box in boxes: bbox = box['bbox'] box_x_start = bbox[0] box_x_end = bbox[2] # 🔑 改进:检查是否有重叠(不只是中心点) overlap = not (box_x_start > x_end or box_x_end < x_start) if overlap: col_boxes.append(box) return col_boxes def _match_and_merge_boxes_for_cell(self, cell_text: str, col_boxes: List[Dict]) -> Optional[Dict]: """ 匹配并合并单元格的多个 boxes 策略: 1. 尝试单个 box 精确匹配 2. 如果失败,尝试合并多个 boxes Args: cell_text: HTML 单元格文本 col_boxes: 该列的候选 boxes Returns: {'bbox': [x1,y1,x2,y2], 'text': str, 'score': float, 'paddle_indices': [idx1, idx2], 'used_boxes': [box1, box2]} """ from fuzzywuzzy import fuzz cell_text_normalized = self.text_matcher.normalize_text(cell_text) if len(cell_text_normalized) < 2: return None # 🔑 策略 1: 单个 box 精确匹配 for box in col_boxes: if box.get('used'): continue box_text = self.text_matcher.normalize_text(box['text']) if cell_text_normalized == box_text: return { 'bbox': box['bbox'], 'text': box['text'], 'score': box['score'], 'paddle_indices': [box['paddle_bbox_index']], 'used_boxes': [box] } # 🔑 策略 2: 多个 boxes 合并匹配 unused_boxes = [b for b in col_boxes if not b.get('used')] if not unused_boxes: return None # 尝试不同的组合长度 for combo_len in range(1, min(len(unused_boxes) + 1, 5)): # 按 y 坐标排序(上到下) sorted_boxes = sorted(unused_boxes, key=lambda b: b['bbox'][1]) # 滑动窗口 for start_idx in range(len(sorted_boxes) - combo_len + 1): combo_boxes = sorted_boxes[start_idx:start_idx + combo_len] # 合并文本 merged_text = ''.join([b['text'] for b in combo_boxes]) merged_text_normalized = self.text_matcher.normalize_text(merged_text) # 计算相似度 similarity = fuzz.partial_ratio(cell_text_normalized, merged_text_normalized) if similarity >= 85: # 高阈值 # 合并 bbox merged_bbox = [ min(b['bbox'][0] for b in combo_boxes), min(b['bbox'][1] for b in combo_boxes), max(b['bbox'][2] for b in combo_boxes), max(b['bbox'][3] for b in combo_boxes) ] return { 'bbox': merged_bbox, 'text': merged_text, 'score': sum(b['score'] for b in combo_boxes) / len(combo_boxes), 'paddle_indices': [b['paddle_bbox_index'] for b in combo_boxes], 'used_boxes': combo_boxes } # 🔑 策略 3: 降级匹配(单个最佳) best_box = None best_score = 0 for box in unused_boxes: box_text = self.text_matcher.normalize_text(box['text']) score = fuzz.partial_ratio(cell_text_normalized, box_text) if score > best_score: best_score = score best_box = box if best_box and best_score >= 70: return { 'bbox': best_box['bbox'], 'text': best_box['text'], 'score': best_box['score'], 'paddle_indices': [best_box['paddle_bbox_index']], 'used_boxes': [best_box] } return None def _filter_boxes_in_table_region(self, paddle_boxes: List[Dict], table_bbox: Optional[List[int]], html: str) -> Tuple[List[Dict], List[int]]: """ 筛选表格区域内的 paddle boxes 策略: 1. 如果有 table_bbox,使用边界框筛选(扩展边界) 2. 如果没有 table_bbox,通过内容匹配推断区域 Args: paddle_boxes: paddle OCR 结果 table_bbox: 表格边界框 [x1, y1, x2, y2] html: HTML 内容(用于内容验证) Returns: (筛选后的 boxes, 实际表格边界框) """ if not paddle_boxes: return [], [0, 0, 0, 0] # 🎯 策略 1: 使用提供的 table_bbox(扩展边界) if table_bbox and len(table_bbox) == 4: x1, y1, x2, y2 = table_bbox # 扩展边界(考虑边框外的文本) margin = 20 expanded_bbox = [ max(0, x1 - margin), max(0, y1 - margin), x2 + margin, y2 + margin ] filtered = [] for box in paddle_boxes: bbox = box['bbox'] box_center_x = (bbox[0] + bbox[2]) / 2 box_center_y = (bbox[1] + bbox[3]) / 2 # 中心点在扩展区域内 if (expanded_bbox[0] <= box_center_x <= expanded_bbox[2] and expanded_bbox[1] <= box_center_y <= expanded_bbox[3]): filtered.append(box) if filtered: # 计算实际边界框 actual_bbox = [ min(b['bbox'][0] for b in filtered), min(b['bbox'][1] for b in filtered), max(b['bbox'][2] for b in filtered), max(b['bbox'][3] for b in filtered) ] return filtered, actual_bbox # 🎯 策略 2: 通过内容匹配推断区域 print(" ℹ️ 无 table_bbox,使用内容匹配推断表格区域...") # 提取 HTML 中的所有文本 from bs4 import BeautifulSoup soup = BeautifulSoup(html, 'html.parser') html_texts = set() for cell in soup.find_all(['td', 'th']): text = cell.get_text(strip=True) if text: html_texts.add(self.text_matcher.normalize_text(text)) if not html_texts: return [], [0, 0, 0, 0] # 找出与 HTML 内容匹配的 boxes matched_boxes = [] for box in paddle_boxes: normalized_text = self.text_matcher.normalize_text(box['text']) # 检查是否匹配 if any(normalized_text in ht or ht in normalized_text for ht in html_texts): matched_boxes.append(box) if not matched_boxes: # 🔑 降级:如果精确匹配失败,使用模糊匹配 print(" ℹ️ 精确匹配失败,尝试模糊匹配...") from fuzzywuzzy import fuzz for box in paddle_boxes: normalized_text = self.text_matcher.normalize_text(box['text']) for ht in html_texts: similarity = fuzz.partial_ratio(normalized_text, ht) if similarity >= 70: # 降低阈值 matched_boxes.append(box) break if matched_boxes: # 计算边界框 actual_bbox = [ min(b['bbox'][0] for b in matched_boxes), min(b['bbox'][1] for b in matched_boxes), max(b['bbox'][2] for b in matched_boxes), max(b['bbox'][3] for b in matched_boxes) ] # 🔑 扩展边界,包含可能遗漏的文本 margin = 30 expanded_bbox = [ max(0, actual_bbox[0] - margin), max(0, actual_bbox[1] - margin), actual_bbox[2] + margin, actual_bbox[3] + margin ] # 重新筛选(包含边界上的文本) final_filtered = [] for box in paddle_boxes: bbox = box['bbox'] box_center_x = (bbox[0] + bbox[2]) / 2 box_center_y = (bbox[1] + bbox[3]) / 2 if (expanded_bbox[0] <= box_center_x <= expanded_bbox[2] and expanded_bbox[1] <= box_center_y <= expanded_bbox[3]): final_filtered.append(box) return final_filtered, actual_bbox # 🔑 最后的降级:返回所有 boxes print(" ⚠️ 无法确定表格区域,使用所有 paddle boxes") if paddle_boxes: actual_bbox = [ min(b['bbox'][0] for b in paddle_boxes), min(b['bbox'][1] for b in paddle_boxes), max(b['bbox'][2] for b in paddle_boxes), max(b['bbox'][3] for b in paddle_boxes) ] return paddle_boxes, actual_bbox return [], [0, 0, 0, 0] def _group_paddle_boxes_by_rows(self, paddle_boxes: List[Dict], y_tolerance: int = 20) -> List[Dict]: """ 将 paddle_text_boxes 按 y 坐标分组(聚类) Args: paddle_boxes: Paddle OCR 文字框列表 y_tolerance: Y 坐标容忍度(像素) Returns: 分组列表,每组包含 {'y_center': float, 'boxes': List[Dict]} """ if not paddle_boxes: return [] # 计算每个 box 的中心 y 坐标 boxes_with_y = [] for box in paddle_boxes: bbox = box['bbox'] y_center = (bbox[1] + bbox[3]) / 2 boxes_with_y.append({ 'y_center': y_center, 'box': box }) # 按 y 坐标排序 boxes_with_y.sort(key=lambda x: x['y_center']) # 聚类 groups = [] current_group = None for item in boxes_with_y: if current_group is None: # 开始新组 current_group = { 'y_center': item['y_center'], 'boxes': [item['box']] } else: # 检查是否属于当前组 if abs(item['y_center'] - current_group['y_center']) <= y_tolerance: current_group['boxes'].append(item['box']) # 更新组的中心(使用平均值) current_group['y_center'] = sum( b['bbox'][1] + b['bbox'][3] for b in current_group['boxes'] ) / (2 * len(current_group['boxes'])) else: # 保存当前组,开始新组 groups.append(current_group) current_group = { 'y_center': item['y_center'], 'boxes': [item['box']] } # 添加最后一组 if current_group: groups.append(current_group) return groups def _find_best_match_in_group(self, target_text: str, boxes: List[Dict], start_idx: int = 0) -> Optional[Dict]: """ 在给定的 boxes 列表中查找最佳匹配(已按 x 坐标排序) Args: target_text: 目标文本 boxes: 候选 boxes(已排序) start_idx: 起始索引 Returns: 最佳匹配的 box 或 None """ target_text = self.text_matcher.normalize_text(target_text) if len(target_text) < 2: return None best_match = None best_score = 0 # 优先从 start_idx 开始查找 search_range = list(range(start_idx, len(boxes))) + list(range(0, start_idx)) for idx in search_range: box = boxes[idx] if box.get('used'): continue box_text = self.text_matcher.normalize_text(box['text']) # 精确匹配 if target_text == box_text: return box # 长度比例检查 length_ratio = min(len(target_text), len(box_text)) / max(len(target_text), len(box_text)) if length_ratio < 0.35: continue # 子串检查 shorter = target_text if len(target_text) < len(box_text) else box_text longer = box_text if len(target_text) < len(box_text) else target_text is_substring = shorter in longer # 计算相似度 from fuzzywuzzy import fuzz partial_ratio = fuzz.partial_ratio(target_text, box_text) if is_substring: partial_ratio += 10 if partial_ratio >= self.text_matcher.similarity_threshold: if partial_ratio > best_score: best_score = partial_ratio best_match = box return best_match def _match_html_rows_to_paddle_groups(self, html_rows: List, grouped_boxes: List[Dict]) -> Dict[int, List[int]]: """ 智能匹配 HTML 行与 paddle 分组(改进版:处理跨行文本) 策略: 1. 第一遍:基于内容精确匹配 2. 第二遍:将未使用的组合并到相邻已匹配的行 """ if not html_rows or not grouped_boxes: return {} mapping = {} # 🎯 策略 1: 数量相等,简单 1:1 映射 if len(html_rows) == len(grouped_boxes): for i in range(len(html_rows)): mapping[i] = [i] return mapping # 🎯 策略 2: 第一遍 - 基于内容精确匹配 used_groups = set() for row_idx, row in enumerate(html_rows): row_texts = [cell.get_text(strip=True) for cell in row.find_all(['td', 'th'])] row_texts = [t for t in row_texts if t] if not row_texts: mapping[row_idx] = [] continue row_text_normalized = [self.text_matcher.normalize_text(t) for t in row_texts] # 查找最匹配的 paddle 组 best_groups = [] best_score = 0 # 尝试匹配单个组 for group_idx, group in enumerate(grouped_boxes): if group_idx in used_groups: continue group_texts = [self.text_matcher.normalize_text(b['text']) for b in group['boxes'] if not b.get('used')] match_count = sum(1 for rt in row_text_normalized if any(rt in gt or gt in rt for gt in group_texts)) coverage = match_count / len(row_texts) if row_texts else 0 if coverage > best_score: best_score = coverage best_groups = [group_idx] # 🔑 如果单组匹配度不高,尝试匹配多个连续组 if best_score < 0.5: # 从当前位置向后查找 start_group = min([g for g in range(len(grouped_boxes)) if g not in used_groups], default=0) combined_texts = [] combined_groups = [] for group_idx in range(start_group, min(start_group + 5, len(grouped_boxes))): if group_idx in used_groups: continue combined_groups.append(group_idx) combined_texts.extend([ self.text_matcher.normalize_text(b['text']) for b in grouped_boxes[group_idx]['boxes'] if not b.get('used') ]) match_count = sum(1 for rt in row_text_normalized if any(rt in gt or gt in rt for gt in combined_texts)) coverage = match_count / len(row_texts) if row_texts else 0 if coverage > best_score: best_score = coverage best_groups = combined_groups.copy() # 记录映射 if best_groups and best_score > 0.3: mapping[row_idx] = best_groups used_groups.update(best_groups) else: # 降级策略:位置推测 estimated_group = min(row_idx, len(grouped_boxes) - 1) if estimated_group not in used_groups: mapping[row_idx] = [estimated_group] used_groups.add(estimated_group) else: mapping[row_idx] = [] # 🎯 策略 3: 第二遍 - 处理未使用的组(关键!) unused_groups = [i for i in range(len(grouped_boxes)) if i not in used_groups] if unused_groups: print(f" ℹ️ 发现 {len(unused_groups)} 个未匹配的 paddle 组: {unused_groups}") # 🔑 将未使用的组合并到相邻的已匹配行 for unused_idx in unused_groups: # 策略:合并到最近的上方或下方已匹配行 # 1. 查找该组的 y 坐标 unused_y = grouped_boxes[unused_idx]['y_center'] # 2. 找到最近的已使用组 closest_used_idx = None min_distance = float('inf') for used_idx in sorted(used_groups): distance = abs(grouped_boxes[used_idx]['y_center'] - unused_y) if distance < min_distance: min_distance = distance closest_used_idx = used_idx if closest_used_idx is not None: # 3. 找到该组对应的 HTML 行 target_html_row = None for html_row_idx, group_indices in mapping.items(): if closest_used_idx in group_indices: target_html_row = html_row_idx break if target_html_row is not None: # 4. 判断合并方向(基于 y 坐标) if unused_y < grouped_boxes[closest_used_idx]['y_center']: # 未使用组在上方,可能是上一行的跨列文本 if target_html_row > 0: # 合并到上一行 if target_html_row - 1 in mapping: if unused_idx not in mapping[target_html_row - 1]: mapping[target_html_row - 1].append(unused_idx) print(f" • 组 {unused_idx} 合并到 HTML 行 {target_html_row - 1}(上方)") else: # 合并到当前行 if unused_idx not in mapping[target_html_row]: mapping[target_html_row].append(unused_idx) print(f" • 组 {unused_idx} 合并到 HTML 行 {target_html_row}(当前)") else: # 未使用组在下方,可能是当前行的跨列文本 if unused_idx not in mapping[target_html_row]: mapping[target_html_row].append(unused_idx) print(f" • 组 {unused_idx} 合并到 HTML 行 {target_html_row}(下方)") used_groups.add(unused_idx) # 🔑 策略 4: 第三遍 - 按 y 坐标排序每行的组索引 for row_idx in mapping: if mapping[row_idx]: mapping[row_idx].sort(key=lambda idx: grouped_boxes[idx]['y_center']) return mapping