Browse Source

feat: add enhanced table cell matcher for PaddleOCR integration

- Implemented TableCellMatcher class to match HTML table cells with PaddleOCR bounding boxes.
- Added methods for filtering, grouping, and matching text boxes within table regions.
- Enhanced column boundary estimation and box clustering for improved accuracy.
- Introduced sequential matching strategy for cells, allowing for better handling of overlapping text.
- Included detailed logging for debugging and tracking matching processes.
zhch158_admin 3 days ago
parent
commit
32378311cf
3 changed files with 2829 additions and 0 deletions
  1. 970 0
      merger/table_cell_matcher_v2.py
  2. 906 0
      merger/table_cell_matcher_v3.py
  3. 953 0
      merger/table_cell_matcher_v4.py

+ 970 - 0
merger/table_cell_matcher_v2.py

@@ -0,0 +1,970 @@
+"""
+表格单元格匹配器
+负责将 HTML 表格单元格与 PaddleOCR bbox 进行匹配
+"""
+from typing import List, Dict, Tuple, Optional
+from bs4 import BeautifulSoup
+import numpy as np
+
+try:
+    from .text_matcher import TextMatcher
+    from .bbox_extractor import BBoxExtractor
+except ImportError:
+    from text_matcher import TextMatcher
+    from bbox_extractor import BBoxExtractor
+
+class TableCellMatcher:
+    """表格单元格匹配器"""
+    
+    def __init__(self, text_matcher: TextMatcher, 
+                 x_tolerance: int = 3, 
+                 y_tolerance: int = 10):
+        """
+        Args:
+            text_matcher: 文本匹配器
+            x_tolerance: X轴容差(用于列边界判断)
+            y_tolerance: Y轴容差(用于行分组)
+        """
+        self.text_matcher = text_matcher
+        self.x_tolerance = x_tolerance
+        self.y_tolerance = y_tolerance
+    
+    def enhance_table_html_with_bbox(self, html: str, paddle_text_boxes: List[Dict],
+                                  start_pointer: int, table_bbox: Optional[List[int]] = None) -> Tuple[str, List[Dict], int]:
+        """
+        为 HTML 表格添加 bbox 信息(优化版:先筛选表格区域)
+        
+        策略:
+        1. 根据 table_bbox 筛选出表格区域内的 paddle_text_boxes
+        2. 将筛选后的 boxes 按行分组
+        3. 智能匹配 HTML 行与 paddle 行组
+        4. 在匹配的组内查找单元格
+    
+        Args:
+            html: HTML 表格
+            paddle_text_boxes: 全部 paddle OCR 结果
+            start_pointer: 开始位置
+            table_bbox: 表格边界框 [x1, y1, x2, y2]
+        """
+        soup = BeautifulSoup(html, 'html.parser')
+        cells = []
+        
+        # 🔑 第一步:筛选表格区域内的 paddle boxes
+        table_region_boxes, actual_table_bbox = self._filter_boxes_in_table_region(
+            paddle_text_boxes[start_pointer:],
+            table_bbox,
+            html
+        )
+        
+        if not table_region_boxes:
+            print(f"⚠️ 未在表格区域找到 paddle boxes")
+            return str(soup), cells, start_pointer
+        
+        print(f"📊 表格区域: {len(table_region_boxes)} 个文本框")
+        print(f"   边界: {actual_table_bbox}")
+        
+        # 🔑 第二步:将表格区域的 boxes 按行分组
+        grouped_boxes = self._group_paddle_boxes_by_rows(
+            table_region_boxes,
+            y_tolerance=self.y_tolerance,
+            auto_correct_skew=True
+        )
+        
+        # 🔑 第三步:在每组内按 x 坐标排序
+        for group in grouped_boxes:
+            group['boxes'].sort(key=lambda x: x['bbox'][0])
+        
+        grouped_boxes.sort(key=lambda g: g['y_center'])
+        
+        print(f"   分组: {len(grouped_boxes)} 行")
+        
+        # 🔑 第四步:智能匹配 HTML 行与 paddle 行组
+        html_rows = soup.find_all('tr')
+        row_mapping = self._match_html_rows_to_paddle_groups(html_rows, grouped_boxes)
+        
+        print(f"   HTML行: {len(html_rows)} 行")
+        print(f"   映射: {len([v for v in row_mapping.values() if v])} 个有效映射")
+        
+        # 🔑 第五步:遍历 HTML 表格,使用映射关系查找
+        for row_idx, row in enumerate(html_rows):
+            group_indices = row_mapping.get(row_idx, [])
+            
+            if not group_indices:
+                continue
+            
+            # 合并多个组的 boxes
+            current_boxes = []
+            for group_idx in group_indices:
+                if group_idx < len(grouped_boxes):
+                    current_boxes.extend(grouped_boxes[group_idx]['boxes'])
+            
+            current_boxes.sort(key=lambda x: x['bbox'][0])
+            
+            # 🎯 关键改进:提取 HTML 单元格并预先确定列边界
+            html_cells = row.find_all(['td', 'th'])
+            
+            if not html_cells:
+                continue
+            
+            # 🔑 预估列边界(基于 x 坐标分布)
+            col_boundaries = self._estimate_column_boundaries(
+                current_boxes, 
+                len(html_cells)
+            )
+            
+            print(f"   行 {row_idx + 1}: {len(html_cells)} 列,边界: {col_boundaries}")
+            
+            # 🎯 关键改进:顺序指针匹配
+            box_pointer = 0  # 当前行的 boxes 指针
+            
+            for col_idx, cell in enumerate(html_cells):
+                cell_text = cell.get_text(strip=True)
+                
+                if not cell_text:
+                    continue
+                
+                # 🔑 从当前指针开始匹配
+                matched_result = self._match_cell_sequential(
+                    cell_text,
+                    current_boxes,
+                    col_boundaries,
+                    box_pointer
+                )
+                
+                if matched_result:
+                    merged_bbox = matched_result['bbox']
+                    merged_text = matched_result['text']
+                    
+                    cell['data-bbox'] = f"[{merged_bbox[0]},{merged_bbox[1]},{merged_bbox[2]},{merged_bbox[3]}]"
+                    cell['data-score'] = f"{matched_result['score']:.4f}"
+                    cell['data-paddle-indices'] = str(matched_result['paddle_indices'])
+                    
+                    cells.append({
+                        'type': 'table_cell',
+                        'text': cell_text,
+                        'matched_text': merged_text,
+                        'bbox': merged_bbox,
+                        'row': row_idx + 1,
+                        'col': col_idx + 1,
+                        'score': matched_result['score'],
+                        'paddle_bbox_indices': matched_result['paddle_indices']
+                    })
+                    
+                    # 标记已使用
+                    for box in matched_result['used_boxes']:
+                        box['used'] = True
+                    
+                    # 🎯 移动指针到最后使用的 box 之后
+                    box_pointer = matched_result['last_used_index'] + 1
+                    
+                    print(f"      列 {col_idx + 1}: '{cell_text[:20]}...' 匹配 {len(matched_result['used_boxes'])} 个box (指针: {box_pointer})")
+        
+        # 计算新的指针位置
+        used_count = sum(1 for box in table_region_boxes if box.get('used'))
+        new_pointer = start_pointer + used_count
+        
+        print(f"   匹配: {len(cells)} 个单元格")
+        
+        return str(soup), cells, new_pointer
+
+
+    def _estimate_column_boundaries(self, boxes: List[Dict], 
+                                    num_cols: int) -> List[Tuple[int, int]]:
+        """
+        估算列边界(改进版:处理同列多文本框)
+        
+        Args:
+            boxes: 当前行的所有 boxes(已按 x 排序)
+            num_cols: HTML 表格的列数
+        
+        Returns:
+            列边界列表 [(x_start, x_end), ...]
+        """
+        if not boxes:
+            return []
+        
+        # 🔑 关键改进:先按 x 坐标聚类(合并同列的多个文本框)
+        x_clusters = self._cluster_boxes_by_x(boxes, x_tolerance=self.x_tolerance)
+        
+        print(f"      X聚类: {len(boxes)} 个boxes -> {len(x_clusters)} 个列簇")
+        
+        # 获取所有 x 坐标范围
+        x_min = min(cluster['x_min'] for cluster in x_clusters)
+        x_max = max(cluster['x_max'] for cluster in x_clusters)
+        
+        # 🎯 策略 1: 如果聚类数量<=列数接近
+        if len(x_clusters) <= num_cols:
+            # 直接使用聚类边界
+            boundaries = [(cluster['x_min'], cluster['x_max']) 
+                        for cluster in x_clusters]
+            return boundaries
+        
+        # 🎯 策略 2: 聚类数多于列数(某些列有多个文本簇)
+        if len(x_clusters) > num_cols:
+            print(f"      ℹ️ 聚类数 {len(x_clusters)} > 列数 {num_cols},合并相近簇")
+            
+            # 合并相近的簇
+            merged_clusters = self._merge_close_clusters(x_clusters, num_cols)
+            
+            boundaries = [(cluster['x_min'], cluster['x_max']) 
+                        for cluster in merged_clusters]
+            return boundaries
+        
+        return []
+
+
+    def _cluster_boxes_by_x(self, boxes: List[Dict], 
+                    x_tolerance: int = 3) -> List[Dict]:
+        """
+        按 x 坐标聚类(合并同列的多个文本框)
+        
+        Args:
+            boxes: 文本框列表
+            x_tolerance: X坐标容忍度
+        
+        Returns:
+            聚类列表 [{'x_min': int, 'x_max': int, 'boxes': List[Dict]}, ...]
+        """
+        if not boxes:
+            return []
+        
+        # 按左边界 x 坐标排序
+        sorted_boxes = sorted(boxes, key=lambda b: b['bbox'][0])
+        
+        clusters = []
+        current_cluster = None
+        
+        for box in sorted_boxes:
+            bbox = box['bbox']
+            x_start = bbox[0]
+            x_end = bbox[2]
+            
+            if current_cluster is None:
+                # 开始新簇
+                current_cluster = {
+                    'x_min': x_start,
+                    'x_max': x_end,
+                    'boxes': [box]
+                }
+            else:
+                # 🔑 检查是否属于当前簇(修正后的逻辑)
+                # 1. x 坐标有重叠:x_start <= current_x_max 且 x_end >= current_x_min
+                # 2. 或者距离在容忍度内
+            
+                has_overlap = (x_start <= current_cluster['x_max'] and 
+                              x_end >= current_cluster['x_min'])
+            
+                is_close = abs(x_start - current_cluster['x_max']) <= x_tolerance
+            
+                if has_overlap or is_close:
+                    # 合并到当前簇
+                    current_cluster['boxes'].append(box)
+                    current_cluster['x_min'] = min(current_cluster['x_min'], x_start)
+                    current_cluster['x_max'] = max(current_cluster['x_max'], x_end)
+                else:
+                    # 保存当前簇,开始新簇
+                    clusters.append(current_cluster)
+                    current_cluster = {
+                        'x_min': x_start,
+                        'x_max': x_end,
+                        'boxes': [box]
+                    }
+    
+        # 添加最后一簇
+        if current_cluster:
+            clusters.append(current_cluster)
+        
+        return clusters
+
+
+    def _merge_close_clusters(self, clusters: List[Dict], 
+                            target_count: int) -> List[Dict]:
+        """
+        合并相近的簇,直到数量等于目标列数
+        
+        Args:
+            clusters: 聚类列表
+            target_count: 目标列数
+        
+        Returns:
+            合并后的聚类列表
+        """
+        if len(clusters) <= target_count:
+            return clusters
+        
+        # 复制一份,避免修改原数据
+        working_clusters = [c.copy() for c in clusters]
+        
+        while len(working_clusters) > target_count:
+            # 找到距离最近的两个簇
+            min_distance = float('inf')
+            merge_idx = 0
+            
+            for i in range(len(working_clusters) - 1):
+                distance = working_clusters[i + 1]['x_min'] - working_clusters[i]['x_max']
+                if distance < min_distance:
+                    min_distance = distance
+                    merge_idx = i
+            
+            # 合并
+            cluster1 = working_clusters[merge_idx]
+            cluster2 = working_clusters[merge_idx + 1]
+            
+            merged_cluster = {
+                'x_min': cluster1['x_min'],
+                'x_max': cluster2['x_max'],
+                'boxes': cluster1['boxes'] + cluster2['boxes']
+            }
+            
+            # 替换
+            working_clusters[merge_idx] = merged_cluster
+            working_clusters.pop(merge_idx + 1)
+        
+        return working_clusters
+
+
+    def _get_boxes_in_column(self, boxes: List[Dict], 
+                            boundaries: List[Tuple[int, int]],
+                            col_idx: int) -> List[Dict]:
+        """
+        获取指定列范围内的 boxes(改进版:包含重叠)
+        
+        Args:
+            boxes: 当前行的所有 boxes
+            boundaries: 列边界
+            col_idx: 列索引
+        
+        Returns:
+            该列的 boxes
+        """
+        if col_idx >= len(boundaries):
+            return []
+        
+        x_start, x_end = boundaries[col_idx]
+        
+        col_boxes = []
+        for box in boxes:
+            bbox = box['bbox']
+            box_x_start = bbox[0]
+            box_x_end = bbox[2]
+            
+            # 🔑 改进:检查是否有重叠(不只是中心点)
+            overlap = not (box_x_start > x_end or box_x_end < x_start)
+            
+            if overlap:
+                col_boxes.append(box)
+        
+        return col_boxes
+
+
+    def _filter_boxes_in_table_region(self, paddle_boxes: List[Dict],
+                                  table_bbox: Optional[List[int]],
+                                  html: str) -> Tuple[List[Dict], List[int]]:
+        """
+        筛选表格区域内的 paddle boxes
+    
+        策略:
+        1. 如果有 table_bbox,使用边界框筛选(扩展边界)
+        2. 如果没有 table_bbox,通过内容匹配推断区域
+    
+        Args:
+            paddle_boxes: paddle OCR 结果
+            table_bbox: 表格边界框 [x1, y1, x2, y2]
+            html: HTML 内容(用于内容验证)
+    
+        Returns:
+            (筛选后的 boxes, 实际表格边界框)
+        """
+        if not paddle_boxes:
+            return [], [0, 0, 0, 0]
+        
+        # 🎯 策略 1: 使用提供的 table_bbox(扩展边界)
+        if table_bbox and len(table_bbox) == 4:
+            x1, y1, x2, y2 = table_bbox
+            
+            # 扩展边界(考虑边框外的文本)
+            margin = 20
+            expanded_bbox = [
+                max(0, x1 - margin),
+                max(0, y1 - margin),
+                x2 + margin,
+                y2 + margin
+            ]
+            
+            filtered = []
+            for box in paddle_boxes:
+                bbox = box['bbox']
+                box_center_x = (bbox[0] + bbox[2]) / 2
+                box_center_y = (bbox[1] + bbox[3]) / 2
+                
+                # 中心点在扩展区域内
+                if (expanded_bbox[0] <= box_center_x <= expanded_bbox[2] and
+                    expanded_bbox[1] <= box_center_y <= expanded_bbox[3]):
+                    filtered.append(box)
+            
+            if filtered:
+                # 计算实际边界框
+                actual_bbox = [
+                    min(b['bbox'][0] for b in filtered),
+                    min(b['bbox'][1] for b in filtered),
+                    max(b['bbox'][2] for b in filtered),
+                    max(b['bbox'][3] for b in filtered)
+                ]
+                return filtered, actual_bbox
+        
+        # 🎯 策略 2: 通过内容匹配推断区域
+        print("   ℹ️ 无 table_bbox,使用内容匹配推断表格区域...")
+        
+        # 提取 HTML 中的所有文本
+        from bs4 import BeautifulSoup
+        soup = BeautifulSoup(html, 'html.parser')
+        html_texts = set()
+        for cell in soup.find_all(['td', 'th']):
+            text = cell.get_text(strip=True)
+            if text:
+                html_texts.add(self.text_matcher.normalize_text(text))
+        
+        if not html_texts:
+            return [], [0, 0, 0, 0]
+        
+        # 找出与 HTML 内容匹配的 boxes
+        matched_boxes = []
+        for box in paddle_boxes:
+            normalized_text = self.text_matcher.normalize_text(box['text'])
+            
+            # 检查是否匹配
+            if any(normalized_text in ht or ht in normalized_text 
+                   for ht in html_texts):
+                matched_boxes.append(box)
+        
+        if not matched_boxes:
+            # 🔑 降级:如果精确匹配失败,使用模糊匹配
+            print("   ℹ️ 精确匹配失败,尝试模糊匹配...")
+            
+            from fuzzywuzzy import fuzz
+            for box in paddle_boxes:
+                normalized_text = self.text_matcher.normalize_text(box['text'])
+                
+                for ht in html_texts:
+                    similarity = fuzz.partial_ratio(normalized_text, ht)
+                    if similarity >= 70:  # 降低阈值
+                        matched_boxes.append(box)
+                        break
+    
+        if matched_boxes:
+            # 计算边界框
+            actual_bbox = [
+                min(b['bbox'][0] for b in matched_boxes),
+                min(b['bbox'][1] for b in matched_boxes),
+                max(b['bbox'][2] for b in matched_boxes),
+                max(b['bbox'][3] for b in matched_boxes)
+            ]
+            
+            # 🔑 扩展边界,包含可能遗漏的文本
+            margin = 30
+            expanded_bbox = [
+                max(0, actual_bbox[0] - margin),
+                max(0, actual_bbox[1] - margin),
+                actual_bbox[2] + margin,
+                actual_bbox[3] + margin
+            ]
+            
+            # 重新筛选(包含边界上的文本)
+            final_filtered = []
+            for box in paddle_boxes:
+                bbox = box['bbox']
+                box_center_x = (bbox[0] + bbox[2]) / 2
+                box_center_y = (bbox[1] + bbox[3]) / 2
+                
+                if (expanded_bbox[0] <= box_center_x <= expanded_bbox[2] and
+                    expanded_bbox[1] <= box_center_y <= expanded_bbox[3]):
+                    final_filtered.append(box)
+            
+            return final_filtered, actual_bbox
+        
+        # 🔑 最后的降级:返回所有 boxes
+        print("   ⚠️ 无法确定表格区域,使用所有 paddle boxes")
+        if paddle_boxes:
+            actual_bbox = [
+                min(b['bbox'][0] for b in paddle_boxes),
+                min(b['bbox'][1] for b in paddle_boxes),
+                max(b['bbox'][2] for b in paddle_boxes),
+                max(b['bbox'][3] for b in paddle_boxes)
+            ]
+            return paddle_boxes, actual_bbox
+        
+        return [], [0, 0, 0, 0]
+
+    def _group_paddle_boxes_by_rows(self, paddle_boxes: List[Dict], 
+                                    y_tolerance: int = 10,
+                                    auto_correct_skew: bool = True) -> List[Dict]:
+        """
+        将 paddle_text_boxes 按 y 坐标分组(聚类)- 增强版本
+    
+        Args:
+            paddle_boxes: Paddle OCR 文字框列表
+            y_tolerance: Y 坐标容忍度(像素)
+            auto_correct_skew: 是否自动校正倾斜
+    
+        Returns:
+            分组列表,每组包含 {'y_center': float, 'boxes': List[Dict]}
+        """
+        if not paddle_boxes:
+            return []
+        
+        # 🎯 步骤 1: 检测并校正倾斜(使用 BBoxExtractor)
+        if auto_correct_skew:
+            rotation_angle = BBoxExtractor.calculate_skew_angle(paddle_boxes)
+            
+            if abs(rotation_angle) > 0.5:
+                max_x = max(box['bbox'][2] for box in paddle_boxes)
+                max_y = max(box['bbox'][3] for box in paddle_boxes)
+                image_size = (max_x, max_y)
+                
+                print(f"   🔧 校正倾斜角度: {rotation_angle:.2f}°")
+                paddle_boxes = BBoxExtractor.correct_boxes_skew(
+                    paddle_boxes, -rotation_angle, image_size
+                )
+        
+        # 🎯 步骤 2: 按校正后的 y 坐标分组
+        boxes_with_y = []
+        for box in paddle_boxes:
+            bbox = box['bbox']
+            y_center = (bbox[1] + bbox[3]) / 2
+            boxes_with_y.append({
+                'y_center': y_center,
+                'box': box
+            })
+        
+        # 按 y 坐标排序
+        boxes_with_y.sort(key=lambda x: x['y_center'])
+        
+        groups = []
+        current_group = None
+        
+        for item in boxes_with_y:
+            if current_group is None:
+                # 开始新组
+                current_group = {
+                    'y_center': item['y_center'],
+                    'boxes': [item['box']]
+                }
+            else:
+                if abs(item['y_center'] - current_group['y_center']) <= y_tolerance:
+                    current_group['boxes'].append(item['box'])
+                    # 更新组的中心
+                    current_group['y_center'] = sum(
+                        (b['bbox'][1] + b['bbox'][3]) / 2 for b in current_group['boxes']
+                    ) / len(current_group['boxes'])
+                else:
+                    groups.append(current_group)
+                    current_group = {
+                        'y_center': item['y_center'],
+                        'boxes': [item['box']]
+                    }
+        
+        if current_group:
+            groups.append(current_group)
+        
+        print(f"   ✓ 分组完成: {len(groups)} 行")
+        
+        return groups
+
+
+    def _match_html_rows_to_paddle_groups(self, html_rows: List, 
+                                        grouped_boxes: List[Dict]) -> Dict[int, List[int]]:
+        """
+        智能匹配 HTML 行与 paddle 分组(优化版:支持跳过无关组 + 防贪婪)
+        """
+        if not html_rows or not grouped_boxes:
+            return {}
+        
+        mapping = {}
+        
+        # 🎯 策略 1: 数量相等,简单 1:1 映射
+        if len(html_rows) == len(grouped_boxes):
+            for i in range(len(html_rows)):
+                mapping[i] = [i]
+            return mapping
+        
+        # 🎯 策略 2: 基于内容匹配(带跳过机制的单调匹配)
+        from fuzzywuzzy import fuzz
+        used_groups = set()
+        next_group_to_check = 0
+        
+        for row_idx, row in enumerate(html_rows):
+            row_cells = row.find_all(['td', 'th'])
+            row_texts = [cell.get_text(strip=True) for cell in row_cells]
+            row_texts = [t for t in row_texts if t]
+            
+            # 提取行首文本(通常是项目名称),用于加权匹配
+            row_header = row_texts[0] if row_texts else ""
+            
+            if not row_texts:
+                mapping[row_idx] = []
+                continue
+            
+            row_text_normalized = [self.text_matcher.normalize_text(t) for t in row_texts]
+            row_combined_text = ''.join(row_text_normalized)
+            
+            best_groups = []
+            best_score = 0
+            
+            # 🆕 动态跳过窗口:首行允许跳过较多(处理文档标题),后续行跳过较少(处理噪声)
+            max_skip = 15 if row_idx == 0 else 5
+            
+            # 遍历可能的跳过数量
+            for skip in range(max_skip + 1):
+                start_group = next_group_to_check + skip
+                
+                if start_group >= len(grouped_boxes):
+                    break
+                
+                # 尝试合并不同数量的组 (1-5)
+                max_merge_window = 5
+                
+                for group_count in range(1, max_merge_window + 1):
+                    end_group = start_group + group_count
+                    if end_group > len(grouped_boxes):
+                        break
+
+                    combined_group_indices = list(range(start_group, end_group))
+                    
+                    # 收集组内所有文本
+                    combined_texts = []
+                    
+                    for g_idx in combined_group_indices:
+                        group_boxes = grouped_boxes[g_idx].get('boxes', [])
+                        for box in group_boxes:
+                            if box.get('used'):
+                                continue
+                            normalized_text = self.text_matcher.normalize_text(box.get('text', ''))
+                            if normalized_text:
+                                combined_texts.append(normalized_text)
+
+                    if not combined_texts:
+                        continue
+                    
+                    paddle_combined_text = ''.join(combined_texts)
+                    
+                    # --- 评分逻辑 ---
+                    match_count = 0
+                    
+                    # 1. 单元格覆盖率
+                    for rt in row_text_normalized:
+                        if len(rt) < 2: 
+                            continue
+                        if rt in paddle_combined_text:
+                            match_count += 1
+                            continue
+                        for ct in combined_texts:
+                            if fuzz.partial_ratio(rt, ct) >= 80:
+                                match_count += 1
+                                break
+                    
+                    coverage = match_count / len(row_texts) if row_texts else 0
+                    
+                    # 2. 整行相似度
+                    row_similarity = fuzz.partial_ratio(row_combined_text, paddle_combined_text) / 100.0
+                    
+                    # 3. 表头关键匹配(加权)
+                    header_score = 0
+                    if len(row_header) > 1:
+                        if row_header in paddle_combined_text:
+                            header_score = 1.0
+                        else:
+                            header_sim = fuzz.partial_ratio(row_header, paddle_combined_text)
+                            if header_sim > 80:
+                                header_score = 0.8
+                    else:
+                        header_score = 0.5
+                    
+                    final_score = (coverage * 0.3) + (row_similarity * 0.3) + (header_score * 0.4)
+                    
+                    # 🔑 惩罚项:合并惩罚 + 跳过惩罚
+                    # 优先选择:不跳过 > 少合并
+                    merge_penalty = (group_count - 1) * 0.05
+                    skip_penalty = skip * 0.02
+                    
+                    adjusted_score = final_score - merge_penalty - skip_penalty
+                    
+                    if adjusted_score > best_score:
+                        best_score = adjusted_score
+                        best_groups = combined_group_indices
+                    
+                    # 早停:如果单组匹配极好,不尝试合并更多
+                    if group_count == 1 and final_score > 0.85:
+                        break
+                
+                # 优化:如果当前 skip 找到了非常好的匹配,就不再尝试更大的 skip
+                # 避免跳过正确的组去匹配后面相似的组
+                if best_score > 0.85:
+                    break
+            
+            # 判定匹配
+            if best_groups and best_score >= 0.4:
+                mapping[row_idx] = best_groups
+                used_groups.update(best_groups)
+                next_group_to_check = max(best_groups) + 1
+                print(f"   ✓ 行 {row_idx} ('{row_header[:10]}...'): 匹配组 {best_groups} (得分: {best_score:.2f})")
+            else:
+                mapping[row_idx] = []
+                # 如果没匹配上,next_group_to_check 不变,给下一行机会
+                print(f"   ✗ 行 {row_idx} ('{row_header[:10]}...'): 无匹配 (最佳得分: {best_score:.2f})")
+
+        # 🎯 策略 3: 第二遍 - 处理未使用的组(关键!)
+        unused_groups = [i for i in range(len(grouped_boxes)) if i not in used_groups]
+        
+        if unused_groups:
+            print(f"   ℹ️ 发现 {len(unused_groups)} 个未匹配的 paddle 组: {unused_groups}")
+            
+            # 🔑 将未使用的组合并到相邻的已匹配行
+            for unused_idx in unused_groups:
+                # 🎯 关键改进:计算与相邻行的边界距离
+                unused_group = grouped_boxes[unused_idx]
+                unused_y_min = min(b['bbox'][1] for b in unused_group['boxes'])
+                unused_y_max = max(b['bbox'][3] for b in unused_group['boxes'])
+                
+                # 🔑 查找上方和下方最近的已使用组
+                above_idx = None
+                below_idx = None
+                above_distance = float('inf')
+                below_distance = float('inf')
+                
+                # 向上查找
+                for i in range(unused_idx - 1, -1, -1):
+                    if i in used_groups:
+                        above_idx = i
+                        # 🎯 边界距离:unused 的最小 y - above 的最大 y
+                        above_group = grouped_boxes[i]
+                        max_y_box = max(
+                            above_group['boxes'],
+                            key=lambda b: b['bbox'][3]
+                        )
+                        above_y_center = (max_y_box['bbox'][1] + max_y_box['bbox'][3]) / 2
+                        above_distance = abs(unused_y_min - above_y_center)
+                        print(f"      • 组 {unused_idx} 与上方组 {i} 距离: {above_distance:.1f}px")
+                        break
+                
+                # 向下查找
+                for i in range(unused_idx + 1, len(grouped_boxes)):
+                    if i in used_groups:
+                        below_idx = i
+                        # 🎯 边界距离:below 的最小 y - unused 的最大 y
+                        below_group = grouped_boxes[i]
+                        min_y_box = min(
+                            below_group['boxes'],
+                            key=lambda b: b['bbox'][1]
+                        )
+                        below_y_center = (min_y_box['bbox'][1] + min_y_box['bbox'][3]) / 2
+                        below_distance = abs(below_y_center - unused_y_max)
+                        print(f"      • 组 {unused_idx} 与下方组 {i} 距离: {below_distance:.1f}px")
+                        break
+                
+                # 🎯 选择距离更近的一侧
+                if above_idx is not None and below_idx is not None:
+                    # 都存在,选择距离更近的
+                    if above_distance < below_distance:
+                        closest_used_idx = above_idx
+                        merge_direction = "上方"
+                    else:
+                        closest_used_idx = below_idx
+                        merge_direction = "下方"
+                    print(f"      ✓ 组 {unused_idx} 选择合并到{merge_direction}组 {closest_used_idx}")
+                elif above_idx is not None:
+                    closest_used_idx = above_idx
+                    merge_direction = "上方"
+                elif below_idx is not None:
+                    closest_used_idx = below_idx
+                    merge_direction = "下方"
+                else:
+                    print(f"      ⚠️ 组 {unused_idx} 无相邻已使用组,跳过")
+                    continue
+                
+                # 🔑 找到该组对应的 HTML 行
+                target_html_row = None
+                for html_row_idx, group_indices in mapping.items():
+                    if closest_used_idx in group_indices:
+                        target_html_row = html_row_idx
+                        break
+                
+                if target_html_row is not None:
+                    # 🎯 根据合并方向决定目标行
+                    if merge_direction == "上方":
+                        # 合并到上方对应的 HTML 行
+                        if target_html_row in mapping:
+                            if unused_idx not in mapping[target_html_row]:
+                                mapping[target_html_row].append(unused_idx)
+                                print(f"      • 组 {unused_idx} 合并到 HTML 行 {target_html_row}(上方行)")
+                    else:
+                        # 合并到下方对应的 HTML 行
+                        if target_html_row in mapping:
+                            if unused_idx not in mapping[target_html_row]:
+                                mapping[target_html_row].append(unused_idx)
+                                print(f"      • 组 {unused_idx} 合并到 HTML 行 {target_html_row}(下方行)")
+                
+                used_groups.add(unused_idx)
+        
+        # 🔑 策略 4: 第三遍 - 按 y 坐标排序每行的组索引
+        for row_idx in mapping:
+            if mapping[row_idx]:
+                mapping[row_idx].sort(key=lambda idx: grouped_boxes[idx]['y_center'])
+        
+        return mapping
+
+    def _preprocess_close_groups(self, grouped_boxes: List[Dict], 
+                                y_gap_threshold: int = 10) -> List[List[int]]:
+        """
+        🆕 预处理:将 y 间距很小的组预合并
+        
+        Args:
+            grouped_boxes: 原始分组
+            y_gap_threshold: Y 间距阈值(小于此值认为是同一行)
+        
+        Returns:
+            预处理后的组索引列表 [[0,1], [2], [3,4,5], ...]
+        """
+        if not grouped_boxes:
+            return []
+        
+        preprocessed = []
+        current_group = [0]
+        
+        for i in range(1, len(grouped_boxes)):
+            prev_group = grouped_boxes[i - 1]
+            curr_group = grouped_boxes[i]
+            
+            # 计算间距
+            prev_y_max = max(b['bbox'][3] for b in prev_group['boxes'])
+            curr_y_min = min(b['bbox'][1] for b in curr_group['boxes'])
+            
+            gap = abs(curr_y_min - prev_y_max)
+            
+            if gap <= y_gap_threshold:
+                # 间距很小,合并
+                current_group.append(i)
+                print(f"   预合并: 组 {i-1} 和 {i} (间距: {gap}px)")
+            else:
+                # 间距较大,开始新组
+                preprocessed.append(current_group)
+                current_group = [i]
+        
+        # 添加最后一组
+        if current_group:
+            preprocessed.append(current_group)
+        
+        return preprocessed
+
+    def _match_cell_sequential(self, cell_text: str, 
+                            boxes: List[Dict],
+                            col_boundaries: List[Tuple[int, int]],
+                            start_idx: int) -> Optional[Dict]:
+        """
+        🎯 顺序匹配单元格:从指定位置开始,逐步合并 boxes 直到匹配
+        
+        策略:
+        1. 找到第一个未使用的 box
+        2. 尝试单个 box 精确匹配
+        3. 如果失败,尝试合并多个 boxes
+        
+        Args:
+            cell_text: HTML 单元格文本
+            boxes: 候选 boxes(已按 x 坐标排序)
+            col_boundaries: 列边界列表
+            start_idx: 起始索引
+        
+        Returns:
+            {'bbox': [x1,y1,x2,y2], 'text': str, 'score': float, 
+            'paddle_indices': [idx1, idx2], 'used_boxes': [box1, box2],
+            'last_used_index': int}
+        """
+        from fuzzywuzzy import fuzz
+        
+        cell_text_normalized = self.text_matcher.normalize_text(cell_text)
+        
+        if len(cell_text_normalized) < 2:
+            return None
+        
+        # 🔑 找到第一个未使用的 box
+        first_unused_idx = start_idx
+        while first_unused_idx < len(boxes) and boxes[first_unused_idx].get('used'):
+            first_unused_idx += 1
+        
+        if first_unused_idx >= len(boxes):
+            return None
+
+        # 🔑 策略 1: 单个 box 精确匹配
+        for box in boxes[first_unused_idx:]:
+            if box.get('used'):
+                continue
+            
+            box_text = self.text_matcher.normalize_text(box['text'])
+            
+            if cell_text_normalized == box_text:
+                return self._build_match_result([box], box['text'], 100.0, boxes.index(box))
+        
+        # 🔑 策略 2: 多个 boxes 合并匹配
+        unused_boxes = [b for b in boxes if not b.get('used')]
+        # 合并同列的 boxes 合并
+        merged_bboxes = []
+        for col_idx in range(len(col_boundaries)):
+            combo_boxes = self._get_boxes_in_column(unused_boxes, col_boundaries, col_idx)
+            if len(combo_boxes) > 0:
+                sorted_combo = sorted(combo_boxes, key=lambda b: (b['bbox'][1], b['bbox'][0]))
+                merged_text = ''.join([b['text'] for b in sorted_combo])
+                merged_bboxes.append({
+                    'text': merged_text,
+                    'sorted_combo': sorted_combo
+                })
+
+        for box in merged_bboxes:
+            # 1. 精确匹配
+            merged_text_normalized = self.text_matcher.normalize_text(box['text'])
+            if cell_text_normalized == merged_text_normalized:
+                last_sort_idx = boxes.index(box['sorted_combo'][-1])
+                return self._build_match_result(box['sorted_combo'], box['text'], 100.0, last_sort_idx)
+            
+            # 2. 子串匹配
+            is_substring = (cell_text_normalized in merged_text_normalized or 
+                        merged_text_normalized in cell_text_normalized)
+            
+            # 3. 模糊匹配
+            similarity = fuzz.partial_ratio(cell_text_normalized, merged_text_normalized)
+            
+            # 🎯 子串匹配加分
+            if is_substring:
+                similarity = min(100, similarity + 10)
+            
+            if similarity >= self.text_matcher.similarity_threshold:
+                print(f"         ✓ 匹配成功: '{cell_text[:15]}' vs '{merged_text[:15]}' (相似度: {similarity})")
+                return self._build_match_result(box['sorted_combo'], box['text'], similarity, start_idx)
+        
+        print(f"         ✗ 匹配失败: '{cell_text[:15]}'")
+        return None
+
+
+    def _build_match_result(self, boxes: List[Dict], text: str, 
+                        score: float, last_index: int) -> Dict:
+        """构建匹配结果(使用原始坐标)"""
+    
+        # 🔑 关键修复:使用 original_bbox(如果存在)
+        def get_original_bbox(box: Dict) -> List[int]:
+            return box.get('original_bbox', box['bbox'])
+        
+        original_bboxes = [get_original_bbox(b) for b in boxes]
+        
+        merged_bbox = [
+            min(b[0] for b in original_bboxes),
+            min(b[1] for b in original_bboxes),
+            max(b[2] for b in original_bboxes),
+            max(b[3] for b in original_bboxes)
+        ]
+        
+        return {
+            'bbox': merged_bbox,  # ✅ 使用原始坐标
+            'text': text,
+            'score': score,
+            'paddle_indices': [b['paddle_bbox_index'] for b in boxes],
+            'used_boxes': boxes,
+            'last_used_index': last_index
+        }

+ 906 - 0
merger/table_cell_matcher_v3.py

@@ -0,0 +1,906 @@
+"""
+表格单元格匹配器
+负责将 HTML 表格单元格与 PaddleOCR bbox 进行匹配
+"""
+from typing import List, Dict, Tuple, Optional
+from bs4 import BeautifulSoup
+import numpy as np
+
+try:
+    from .text_matcher import TextMatcher
+    from .bbox_extractor import BBoxExtractor
+except ImportError:
+    from text_matcher import TextMatcher
+    from bbox_extractor import BBoxExtractor
+
+class TableCellMatcher:
+    """表格单元格匹配器"""
+    
+    def __init__(self, text_matcher: TextMatcher, 
+                 x_tolerance: int = 3, 
+                 y_tolerance: int = 10):
+        """
+        Args:
+            text_matcher: 文本匹配器
+            x_tolerance: X轴容差(用于列边界判断)
+            y_tolerance: Y轴容差(用于行分组)
+        """
+        self.text_matcher = text_matcher
+        self.x_tolerance = x_tolerance
+        self.y_tolerance = y_tolerance
+    
+    def enhance_table_html_with_bbox(self, html: str, paddle_text_boxes: List[Dict],
+                                  start_pointer: int, table_bbox: Optional[List[int]] = None) -> Tuple[str, List[Dict], int]:
+        """
+        为 HTML 表格添加 bbox 信息(优化版:先筛选表格区域)
+        
+        策略:
+        1. 根据 table_bbox 筛选出表格区域内的 paddle_text_boxes
+        2. 将筛选后的 boxes 按行分组
+        3. 智能匹配 HTML 行与 paddle 行组
+        4. 在匹配的组内查找单元格
+    
+        Args:
+            html: HTML 表格
+            paddle_text_boxes: 全部 paddle OCR 结果
+            start_pointer: 开始位置
+            table_bbox: 表格边界框 [x1, y1, x2, y2]
+        """
+        soup = BeautifulSoup(html, 'html.parser')
+        cells = []
+        
+        # 🔑 第一步:筛选表格区域内的 paddle boxes
+        table_region_boxes, actual_table_bbox = self._filter_boxes_in_table_region(
+            paddle_text_boxes[start_pointer:],
+            table_bbox,
+            html
+        )
+        
+        if not table_region_boxes:
+            print(f"⚠️ 未在表格区域找到 paddle boxes")
+            return str(soup), cells, start_pointer
+        
+        print(f"📊 表格区域: {len(table_region_boxes)} 个文本框")
+        print(f"   边界: {actual_table_bbox}")
+        
+        # 🔑 第二步:将表格区域的 boxes 按行分组
+        grouped_boxes = self._group_paddle_boxes_by_rows(
+            table_region_boxes,
+            y_tolerance=self.y_tolerance,
+            auto_correct_skew=True
+        )
+        
+        # 🔑 第三步:在每组内按 x 坐标排序
+        for group in grouped_boxes:
+            group['boxes'].sort(key=lambda x: x['bbox'][0])
+        
+        grouped_boxes.sort(key=lambda g: g['y_center'])
+        
+        print(f"   分组: {len(grouped_boxes)} 行")
+        
+        # 🔑 第四步:智能匹配 HTML 行与 paddle 行组
+        html_rows = soup.find_all('tr')
+        row_mapping = self._match_html_rows_to_paddle_groups(html_rows, grouped_boxes)
+        
+        print(f"   HTML行: {len(html_rows)} 行")
+        print(f"   映射: {len([v for v in row_mapping.values() if v])} 个有效映射")
+        
+        # 🔑 第五步:遍历 HTML 表格,使用映射关系查找
+        for row_idx, row in enumerate(html_rows):
+            group_indices = row_mapping.get(row_idx, [])
+            
+            if not group_indices:
+                continue
+            
+            # 合并多个组的 boxes
+            current_boxes = []
+            for group_idx in group_indices:
+                if group_idx < len(grouped_boxes):
+                    current_boxes.extend(grouped_boxes[group_idx]['boxes'])
+            
+            current_boxes.sort(key=lambda x: x['bbox'][0])
+            
+            # 🎯 关键改进:提取 HTML 单元格并预先确定列边界
+            html_cells = row.find_all(['td', 'th'])
+            
+            if not html_cells:
+                continue
+            
+            # 🔑 预估列边界(基于 x 坐标分布)
+            col_boundaries = self._estimate_column_boundaries(
+                current_boxes, 
+                len(html_cells)
+            )
+            
+            print(f"   行 {row_idx + 1}: {len(html_cells)} 列,边界: {col_boundaries}")
+            
+            # 🎯 关键改进:顺序指针匹配
+            box_pointer = 0  # 当前行的 boxes 指针
+            
+            for col_idx, cell in enumerate(html_cells):
+                cell_text = cell.get_text(strip=True)
+                
+                if not cell_text:
+                    continue
+                
+                # 🔑 从当前指针开始匹配
+                matched_result = self._match_cell_sequential(
+                    cell_text,
+                    current_boxes,
+                    col_boundaries,
+                    box_pointer
+                )
+                
+                if matched_result:
+                    merged_bbox = matched_result['bbox']
+                    merged_text = matched_result['text']
+                    
+                    cell['data-bbox'] = f"[{merged_bbox[0]},{merged_bbox[1]},{merged_bbox[2]},{merged_bbox[3]}]"
+                    cell['data-score'] = f"{matched_result['score']:.4f}"
+                    cell['data-paddle-indices'] = str(matched_result['paddle_indices'])
+                    
+                    cells.append({
+                        'type': 'table_cell',
+                        'text': cell_text,
+                        'matched_text': merged_text,
+                        'bbox': merged_bbox,
+                        'row': row_idx + 1,
+                        'col': col_idx + 1,
+                        'score': matched_result['score'],
+                        'paddle_bbox_indices': matched_result['paddle_indices']
+                    })
+                    
+                    # 标记已使用
+                    for box in matched_result['used_boxes']:
+                        box['used'] = True
+                    
+                    # 🎯 移动指针到最后使用的 box 之后
+                    box_pointer = matched_result['last_used_index'] + 1
+                    
+                    print(f"      列 {col_idx + 1}: '{cell_text[:20]}...' 匹配 {len(matched_result['used_boxes'])} 个box (指针: {box_pointer})")
+        
+        # 计算新的指针位置
+        used_count = sum(1 for box in table_region_boxes if box.get('used'))
+        new_pointer = start_pointer + used_count
+        
+        print(f"   匹配: {len(cells)} 个单元格")
+        
+        return str(soup), cells, new_pointer
+
+
+    def _estimate_column_boundaries(self, boxes: List[Dict], 
+                                    num_cols: int) -> List[Tuple[int, int]]:
+        """
+        估算列边界(改进版:处理同列多文本框)
+        
+        Args:
+            boxes: 当前行的所有 boxes(已按 x 排序)
+            num_cols: HTML 表格的列数
+        
+        Returns:
+            列边界列表 [(x_start, x_end), ...]
+        """
+        if not boxes:
+            return []
+        
+        # 🔑 关键改进:先按 x 坐标聚类(合并同列的多个文本框)
+        x_clusters = self._cluster_boxes_by_x(boxes, x_tolerance=self.x_tolerance)
+        
+        print(f"      X聚类: {len(boxes)} 个boxes -> {len(x_clusters)} 个列簇")
+        
+        # 获取所有 x 坐标范围
+        x_min = min(cluster['x_min'] for cluster in x_clusters)
+        x_max = max(cluster['x_max'] for cluster in x_clusters)
+        
+        # 🎯 策略 1: 如果聚类数量<=列数接近
+        if len(x_clusters) <= num_cols:
+            # 直接使用聚类边界
+            boundaries = [(cluster['x_min'], cluster['x_max']) 
+                        for cluster in x_clusters]
+            return boundaries
+        
+        # 🎯 策略 2: 聚类数多于列数(某些列有多个文本簇)
+        if len(x_clusters) > num_cols:
+            print(f"      ℹ️ 聚类数 {len(x_clusters)} > 列数 {num_cols},合并相近簇")
+            
+            # 合并相近的簇
+            merged_clusters = self._merge_close_clusters(x_clusters, num_cols)
+            
+            boundaries = [(cluster['x_min'], cluster['x_max']) 
+                        for cluster in merged_clusters]
+            return boundaries
+        
+        return []
+
+
+    def _cluster_boxes_by_x(self, boxes: List[Dict], 
+                    x_tolerance: int = 3) -> List[Dict]:
+        """
+        按 x 坐标聚类(合并同列的多个文本框)
+        
+        Args:
+            boxes: 文本框列表
+            x_tolerance: X坐标容忍度
+        
+        Returns:
+            聚类列表 [{'x_min': int, 'x_max': int, 'boxes': List[Dict]}, ...]
+        """
+        if not boxes:
+            return []
+        
+        # 按左边界 x 坐标排序
+        sorted_boxes = sorted(boxes, key=lambda b: b['bbox'][0])
+        
+        clusters = []
+        current_cluster = None
+        
+        for box in sorted_boxes:
+            bbox = box['bbox']
+            x_start = bbox[0]
+            x_end = bbox[2]
+            
+            if current_cluster is None:
+                # 开始新簇
+                current_cluster = {
+                    'x_min': x_start,
+                    'x_max': x_end,
+                    'boxes': [box]
+                }
+            else:
+                # 🔑 检查是否属于当前簇(修正后的逻辑)
+                # 1. x 坐标有重叠:x_start <= current_x_max 且 x_end >= current_x_min
+                # 2. 或者距离在容忍度内
+            
+                has_overlap = (x_start <= current_cluster['x_max'] and 
+                              x_end >= current_cluster['x_min'])
+            
+                is_close = abs(x_start - current_cluster['x_max']) <= x_tolerance
+            
+                if has_overlap or is_close:
+                    # 合并到当前簇
+                    current_cluster['boxes'].append(box)
+                    current_cluster['x_min'] = min(current_cluster['x_min'], x_start)
+                    current_cluster['x_max'] = max(current_cluster['x_max'], x_end)
+                else:
+                    # 保存当前簇,开始新簇
+                    clusters.append(current_cluster)
+                    current_cluster = {
+                        'x_min': x_start,
+                        'x_max': x_end,
+                        'boxes': [box]
+                    }
+    
+        # 添加最后一簇
+        if current_cluster:
+            clusters.append(current_cluster)
+        
+        return clusters
+
+
+    def _merge_close_clusters(self, clusters: List[Dict], 
+                            target_count: int) -> List[Dict]:
+        """
+        合并相近的簇,直到数量等于目标列数
+        
+        Args:
+            clusters: 聚类列表
+            target_count: 目标列数
+        
+        Returns:
+            合并后的聚类列表
+        """
+        if len(clusters) <= target_count:
+            return clusters
+        
+        # 复制一份,避免修改原数据
+        working_clusters = [c.copy() for c in clusters]
+        
+        while len(working_clusters) > target_count:
+            # 找到距离最近的两个簇
+            min_distance = float('inf')
+            merge_idx = 0
+            
+            for i in range(len(working_clusters) - 1):
+                distance = working_clusters[i + 1]['x_min'] - working_clusters[i]['x_max']
+                if distance < min_distance:
+                    min_distance = distance
+                    merge_idx = i
+            
+            # 合并
+            cluster1 = working_clusters[merge_idx]
+            cluster2 = working_clusters[merge_idx + 1]
+            
+            merged_cluster = {
+                'x_min': cluster1['x_min'],
+                'x_max': cluster2['x_max'],
+                'boxes': cluster1['boxes'] + cluster2['boxes']
+            }
+            
+            # 替换
+            working_clusters[merge_idx] = merged_cluster
+            working_clusters.pop(merge_idx + 1)
+        
+        return working_clusters
+
+
+    def _get_boxes_in_column(self, boxes: List[Dict], 
+                            boundaries: List[Tuple[int, int]],
+                            col_idx: int) -> List[Dict]:
+        """
+        获取指定列范围内的 boxes(改进版:包含重叠)
+        
+        Args:
+            boxes: 当前行的所有 boxes
+            boundaries: 列边界
+            col_idx: 列索引
+        
+        Returns:
+            该列的 boxes
+        """
+        if col_idx >= len(boundaries):
+            return []
+        
+        x_start, x_end = boundaries[col_idx]
+        
+        col_boxes = []
+        for box in boxes:
+            bbox = box['bbox']
+            box_x_start = bbox[0]
+            box_x_end = bbox[2]
+            
+            # 🔑 改进:检查是否有重叠(不只是中心点)
+            overlap = not (box_x_start > x_end or box_x_end < x_start)
+            
+            if overlap:
+                col_boxes.append(box)
+        
+        return col_boxes
+
+
+    def _filter_boxes_in_table_region(self, paddle_boxes: List[Dict],
+                                  table_bbox: Optional[List[int]],
+                                  html: str) -> Tuple[List[Dict], List[int]]:
+        """
+        筛选表格区域内的 paddle boxes
+    
+        策略:
+        1. 如果有 table_bbox,使用边界框筛选(扩展边界)
+        2. 如果没有 table_bbox,通过内容匹配推断区域
+    
+        Args:
+            paddle_boxes: paddle OCR 结果
+            table_bbox: 表格边界框 [x1, y1, x2, y2]
+            html: HTML 内容(用于内容验证)
+    
+        Returns:
+            (筛选后的 boxes, 实际表格边界框)
+        """
+        if not paddle_boxes:
+            return [], [0, 0, 0, 0]
+        
+        # 🎯 策略 1: 使用提供的 table_bbox(扩展边界)
+        if table_bbox and len(table_bbox) == 4:
+            x1, y1, x2, y2 = table_bbox
+            
+            # 扩展边界(考虑边框外的文本)
+            margin = 20
+            expanded_bbox = [
+                max(0, x1 - margin),
+                max(0, y1 - margin),
+                x2 + margin,
+                y2 + margin
+            ]
+            
+            filtered = []
+            for box in paddle_boxes:
+                bbox = box['bbox']
+                box_center_x = (bbox[0] + bbox[2]) / 2
+                box_center_y = (bbox[1] + bbox[3]) / 2
+                
+                # 中心点在扩展区域内
+                if (expanded_bbox[0] <= box_center_x <= expanded_bbox[2] and
+                    expanded_bbox[1] <= box_center_y <= expanded_bbox[3]):
+                    filtered.append(box)
+            
+            if filtered:
+                # 计算实际边界框
+                actual_bbox = [
+                    min(b['bbox'][0] for b in filtered),
+                    min(b['bbox'][1] for b in filtered),
+                    max(b['bbox'][2] for b in filtered),
+                    max(b['bbox'][3] for b in filtered)
+                ]
+                return filtered, actual_bbox
+        
+        # 🎯 策略 2: 通过内容匹配推断区域
+        print("   ℹ️ 无 table_bbox,使用内容匹配推断表格区域...")
+        
+        # 提取 HTML 中的所有文本
+        from bs4 import BeautifulSoup
+        soup = BeautifulSoup(html, 'html.parser')
+        html_texts = set()
+        for cell in soup.find_all(['td', 'th']):
+            text = cell.get_text(strip=True)
+            if text:
+                html_texts.add(self.text_matcher.normalize_text(text))
+        
+        if not html_texts:
+            return [], [0, 0, 0, 0]
+        
+        # 找出与 HTML 内容匹配的 boxes
+        matched_boxes = []
+        for box in paddle_boxes:
+            normalized_text = self.text_matcher.normalize_text(box['text'])
+            
+            # 检查是否匹配
+            if any(normalized_text in ht or ht in normalized_text 
+                   for ht in html_texts):
+                matched_boxes.append(box)
+        
+        if not matched_boxes:
+            # 🔑 降级:如果精确匹配失败,使用模糊匹配
+            print("   ℹ️ 精确匹配失败,尝试模糊匹配...")
+            
+            from fuzzywuzzy import fuzz
+            for box in paddle_boxes:
+                normalized_text = self.text_matcher.normalize_text(box['text'])
+                
+                for ht in html_texts:
+                    similarity = fuzz.partial_ratio(normalized_text, ht)
+                    if similarity >= 70:  # 降低阈值
+                        matched_boxes.append(box)
+                        break
+    
+        if matched_boxes:
+            # 计算边界框
+            actual_bbox = [
+                min(b['bbox'][0] for b in matched_boxes),
+                min(b['bbox'][1] for b in matched_boxes),
+                max(b['bbox'][2] for b in matched_boxes),
+                max(b['bbox'][3] for b in matched_boxes)
+            ]
+            
+            # 🔑 扩展边界,包含可能遗漏的文本
+            margin = 30
+            expanded_bbox = [
+                max(0, actual_bbox[0] - margin),
+                max(0, actual_bbox[1] - margin),
+                actual_bbox[2] + margin,
+                actual_bbox[3] + margin
+            ]
+            
+            # 重新筛选(包含边界上的文本)
+            final_filtered = []
+            for box in paddle_boxes:
+                bbox = box['bbox']
+                box_center_x = (bbox[0] + bbox[2]) / 2
+                box_center_y = (bbox[1] + bbox[3]) / 2
+                
+                if (expanded_bbox[0] <= box_center_x <= expanded_bbox[2] and
+                    expanded_bbox[1] <= box_center_y <= expanded_bbox[3]):
+                    final_filtered.append(box)
+            
+            return final_filtered, actual_bbox
+        
+        # 🔑 最后的降级:返回所有 boxes
+        print("   ⚠️ 无法确定表格区域,使用所有 paddle boxes")
+        if paddle_boxes:
+            actual_bbox = [
+                min(b['bbox'][0] for b in paddle_boxes),
+                min(b['bbox'][1] for b in paddle_boxes),
+                max(b['bbox'][2] for b in paddle_boxes),
+                max(b['bbox'][3] for b in paddle_boxes)
+            ]
+            return paddle_boxes, actual_bbox
+        
+        return [], [0, 0, 0, 0]
+
+    def _group_paddle_boxes_by_rows(self, paddle_boxes: List[Dict], 
+                                    y_tolerance: int = 10,
+                                    auto_correct_skew: bool = True) -> List[Dict]:
+        """
+        将 paddle_text_boxes 按 y 坐标分组(聚类)- 增强版本
+    
+        Args:
+            paddle_boxes: Paddle OCR 文字框列表
+            y_tolerance: Y 坐标容忍度(像素)
+            auto_correct_skew: 是否自动校正倾斜
+    
+        Returns:
+            分组列表,每组包含 {'y_center': float, 'boxes': List[Dict]}
+        """
+        if not paddle_boxes:
+            return []
+        
+        # 🎯 步骤 1: 检测并校正倾斜(使用 BBoxExtractor)
+        if auto_correct_skew:
+            rotation_angle = BBoxExtractor.calculate_skew_angle(paddle_boxes)
+            
+            if abs(rotation_angle) > 0.5:
+                max_x = max(box['bbox'][2] for box in paddle_boxes)
+                max_y = max(box['bbox'][3] for box in paddle_boxes)
+                image_size = (max_x, max_y)
+                
+                print(f"   🔧 校正倾斜角度: {rotation_angle:.2f}°")
+                paddle_boxes = BBoxExtractor.correct_boxes_skew(
+                    paddle_boxes, -rotation_angle, image_size
+                )
+        
+        # 🎯 步骤 2: 按校正后的 y 坐标分组
+        boxes_with_y = []
+        for box in paddle_boxes:
+            bbox = box['bbox']
+            y_center = (bbox[1] + bbox[3]) / 2
+            boxes_with_y.append({
+                'y_center': y_center,
+                'box': box
+            })
+        
+        # 按 y 坐标排序
+        boxes_with_y.sort(key=lambda x: x['y_center'])
+        
+        groups = []
+        current_group = None
+        
+        for item in boxes_with_y:
+            if current_group is None:
+                # 开始新组
+                current_group = {
+                    'y_center': item['y_center'],
+                    'boxes': [item['box']]
+                }
+            else:
+                if abs(item['y_center'] - current_group['y_center']) <= y_tolerance:
+                    current_group['boxes'].append(item['box'])
+                    # 更新组的中心
+                    current_group['y_center'] = sum(
+                        (b['bbox'][1] + b['bbox'][3]) / 2 for b in current_group['boxes']
+                    ) / len(current_group['boxes'])
+                else:
+                    groups.append(current_group)
+                    current_group = {
+                        'y_center': item['y_center'],
+                        'boxes': [item['box']]
+                    }
+        
+        if current_group:
+            groups.append(current_group)
+        
+        print(f"   ✓ 分组完成: {len(groups)} 行")
+        
+        return groups
+
+
+    def _match_html_rows_to_paddle_groups(self, html_rows: List, 
+                                        grouped_boxes: List[Dict]) -> Dict[int, List[int]]:
+        """
+        智能匹配 HTML 行与 paddle 分组(优化版:支持跳过无关组 + 防贪婪)
+        """
+        if not html_rows or not grouped_boxes:
+            return {}
+        
+        mapping = {}
+        
+        # 🎯 策略 1: 数量相等,简单 1:1 映射
+        if len(html_rows) == len(grouped_boxes):
+            for i in range(len(html_rows)):
+                mapping[i] = [i]
+            return mapping
+        
+        # 🎯 策略 2: 基于内容匹配(带跳过机制的单调匹配)
+        # 提取 HTML 文本
+        html_row_texts = []
+        for row in html_rows:
+            cells = row.find_all(['td', 'th'])
+            texts = [self.text_matcher.normalize_text(c.get_text(strip=True)) for c in cells]
+            html_row_texts.append("".join(texts))
+
+        # 辅助函数:获取组的文本
+        def get_group_text(idx):
+            if 0 <= idx < len(grouped_boxes):
+                boxes = grouped_boxes[idx]['boxes']
+                # 注意:这里不排序,假设 grouped_boxes 内部已经按 X 排序
+                texts = [self.text_matcher.normalize_text(b['text']) for b in boxes]
+                return "".join(texts)
+            return ""
+
+        paddle_idx = 0
+        num_paddle = len(grouped_boxes)
+        used_groups = set()
+
+        for html_idx, html_row_text in enumerate(html_row_texts):
+            if paddle_idx >= num_paddle:
+                mapping[html_idx] = []
+                continue
+            
+            if not html_row_text:
+                mapping[html_idx] = []
+                continue
+
+            # --- 步骤 1: 在搜索窗口内找到最佳的"起始"组 ---
+            best_score = 0.0
+            best_start_idx = -1
+            
+            # 搜索窗口:当前指针向后 10 个组
+            search_limit = min(paddle_idx + 10, num_paddle)
+            
+            for i in range(paddle_idx, search_limit):
+                # 计算单组得分
+                group_text = get_group_text(i)
+                score = self._calculate_similarity(html_row_text, group_text)
+                
+                # 距离惩罚(越远的组扣分越多,优先匹配最近的)
+                dist_penalty = (i - paddle_idx) * 0.02
+                final_score = score - dist_penalty
+                
+                if final_score > best_score and final_score > 0.3: # 基础阈值
+                    best_score = final_score
+                    best_start_idx = i
+            
+            if best_start_idx == -1:
+                mapping[html_idx] = []
+                continue 
+            
+            # --- 步骤 2: 贪婪合并 (Look-ahead Merge) ---
+            current_indices = [best_start_idx]
+            current_text = get_group_text(best_start_idx)
+            current_score = self._calculate_similarity(html_row_text, current_text)
+            
+            next_probe = best_start_idx + 1
+            max_merge = 5 # 限制最大合并行数
+            
+            while next_probe < num_paddle and len(current_indices) < max_merge:
+                next_group_text = get_group_text(next_probe)
+                
+                # 🛑 关键改进:前瞻检查 (Look-ahead Check)
+                # 检查候选组是否更属于下一行
+                should_stop = False
+                if html_idx + 1 < len(html_row_texts):
+                    next_html_text = html_row_texts[html_idx + 1]
+                    
+                    # 计算该组属于当前行的程度 vs 属于下一行的程度
+                    # 使用 (group, html) 顺序,计算 group 被 html 覆盖的程度
+                    match_current = self._calculate_similarity(next_group_text, html_row_text)
+                    match_next = self._calculate_similarity(next_group_text, next_html_text)
+                    
+                    # 如果更匹配下一行,且匹配度可信(>0.5),则停止
+                    if match_next > match_current and match_next > 0.5:
+                        print(f"      🛑 停止合并组 {next_probe}: 更匹配下一行 (Next: {match_next:.2f} > Curr: {match_current:.2f})")
+                        should_stop = True
+                
+                if should_stop:
+                    break
+
+                # 尝试合并
+                combined_text = current_text + next_group_text
+                new_score = self._calculate_similarity(html_row_text, combined_text)
+                
+                # 判定条件:分数提升
+                if new_score > current_score + 0.01: 
+                    current_indices.append(next_probe)
+                    current_text = combined_text
+                    current_score = new_score
+                    next_probe += 1
+                else:
+                    break
+            
+            mapping[html_idx] = current_indices
+            used_groups.update(current_indices)
+            
+            print(f"   ✓ 行 {html_idx}: 匹配组 {current_indices} (得分: {current_score:.2f})")
+            
+            # 更新指针:跳过已使用的组
+            paddle_idx = current_indices[-1] + 1
+
+        # --- 步骤 3: 处理未匹配的组 (Orphans) ---
+        unused_groups = [i for i in range(len(grouped_boxes)) if i not in used_groups]
+        
+        if unused_groups:
+            print(f"   ℹ️ 发现 {len(unused_groups)} 个未匹配的 paddle 组: {unused_groups}")
+            for unused_idx in unused_groups:
+                unused_group = grouped_boxes[unused_idx]
+                unused_y_min = min(b['bbox'][1] for b in unused_group['boxes'])
+                unused_y_max = max(b['bbox'][3] for b in unused_group['boxes'])
+                
+                above_idx = None
+                below_idx = None
+                above_distance = float('inf')
+                below_distance = float('inf')
+                
+                for i in range(unused_idx - 1, -1, -1):
+                    if i in used_groups:
+                        above_idx = i
+                        above_group = grouped_boxes[i]
+                        max_y_box = max(above_group['boxes'], key=lambda b: b['bbox'][3])
+                        above_y_center = (max_y_box['bbox'][1] + max_y_box['bbox'][3]) / 2
+                        above_distance = abs(unused_y_min - above_y_center)
+                        break
+                
+                for i in range(unused_idx + 1, len(grouped_boxes)):
+                    if i in used_groups:
+                        below_idx = i
+                        below_group = grouped_boxes[i]
+                        min_y_box = min(below_group['boxes'], key=lambda b: b['bbox'][1])
+                        below_y_center = (min_y_box['bbox'][1] + min_y_box['bbox'][3]) / 2
+                        below_distance = abs(below_y_center - unused_y_max)
+                        break
+                
+                closest_used_idx = None
+                merge_direction = ""
+                
+                if above_idx is not None and below_idx is not None:
+                    if above_distance < below_distance:
+                        closest_used_idx = above_idx
+                        merge_direction = "上方"
+                    else:
+                        closest_used_idx = below_idx
+                        merge_direction = "下方"
+                elif above_idx is not None:
+                    closest_used_idx = above_idx
+                    merge_direction = "上方"
+                elif below_idx is not None:
+                    closest_used_idx = below_idx
+                    merge_direction = "下方"
+                
+                if closest_used_idx is not None:
+                    target_html_row = None
+                    for html_row_idx, group_indices in mapping.items():
+                        if closest_used_idx in group_indices:
+                            target_html_row = html_row_idx
+                            break
+                    
+                    if target_html_row is not None:
+                        if unused_idx not in mapping[target_html_row]:
+                            mapping[target_html_row].append(unused_idx)
+                            mapping[target_html_row].sort()
+                            print(f"      • 组 {unused_idx} 合并到 HTML 行 {target_html_row}({merge_direction}行)")                
+                used_groups.add(unused_idx)
+        
+        # 🔑 策略 4: 第三遍 - 按 y 坐标排序每行的组索引
+        for row_idx in mapping:
+            if mapping[row_idx]:
+                mapping[row_idx].sort(key=lambda idx: grouped_boxes[idx]['y_center'])
+        
+        return mapping
+
+    def _calculate_similarity(self, text1: str, text2: str) -> float:
+        """
+        计算两个文本的相似度,结合字符覆盖率和序列相似度
+        """
+        if not text1 or not text2:
+            return 0.0
+            
+        # 1. 字符覆盖率 (Character Overlap) - 解决乱序/交错问题
+        from collections import Counter
+        c1 = Counter(text1)
+        c2 = Counter(text2)
+        
+        # 计算交集字符数
+        intersection = c1 & c2
+        overlap_count = sum(intersection.values())
+        
+        # 覆盖率:paddle 文本中有多少是 html 文本需要的
+        coverage = overlap_count / len(text1) if len(text1) > 0 else 0
+        
+        # 2. 序列相似度 (Sequence Similarity) - 解决完全不相关但字符相似的问题
+        from fuzzywuzzy import fuzz
+        # 使用 token_sort_ratio 来容忍一定的乱序
+        seq_score = fuzz.token_sort_ratio(text1, text2) / 100.0
+        
+        # 综合评分:侧重覆盖率,因为对于 OCR 结果合并,内容完整性比顺序更重要
+        return (coverage * 0.7) + (seq_score * 0.3)
+
+
+    def _match_cell_sequential(self, cell_text: str, 
+                            boxes: List[Dict],
+                            col_boundaries: List[Tuple[int, int]],
+                            start_idx: int) -> Optional[Dict]:
+        """
+        🎯 顺序匹配单元格:从指定位置开始,逐步合并 boxes 直到匹配
+        
+        策略:
+        1. 找到第一个未使用的 box
+        2. 尝试单个 box 精确匹配
+        3. 如果失败,尝试合并多个 boxes
+        
+        Args:
+            cell_text: HTML 单元格文本
+            boxes: 候选 boxes(已按 x 坐标排序)
+            col_boundaries: 列边界列表
+            start_idx: 起始索引
+        
+        Returns:
+            {'bbox': [x1,y1,x2,y2], 'text': str, 'score': float, 
+            'paddle_indices': [idx1, idx2], 'used_boxes': [box1, box2],
+            'last_used_index': int}
+        """
+        from fuzzywuzzy import fuzz
+        
+        cell_text_normalized = self.text_matcher.normalize_text(cell_text)
+        
+        if len(cell_text_normalized) < 2:
+            return None
+        
+        # 🔑 找到第一个未使用的 box
+        first_unused_idx = start_idx
+        while first_unused_idx < len(boxes) and boxes[first_unused_idx].get('used'):
+            first_unused_idx += 1
+        
+        if first_unused_idx >= len(boxes):
+            return None
+
+        # 🔑 策略 1: 单个 box 精确匹配
+        for box in boxes[first_unused_idx:]:
+            if box.get('used'):
+                continue
+            
+            box_text = self.text_matcher.normalize_text(box['text'])
+            
+            if cell_text_normalized == box_text:
+                return self._build_match_result([box], box['text'], 100.0, boxes.index(box))
+        
+        # 🔑 策略 2: 多个 boxes 合并匹配
+        unused_boxes = [b for b in boxes if not b.get('used')]
+        # 合并同列的 boxes 合并
+        merged_bboxes = []
+        for col_idx in range(len(col_boundaries)):
+            combo_boxes = self._get_boxes_in_column(unused_boxes, col_boundaries, col_idx)
+            if len(combo_boxes) > 0:
+                sorted_combo = sorted(combo_boxes, key=lambda b: (b['bbox'][1], b['bbox'][0]))
+                merged_text = ''.join([b['text'] for b in sorted_combo])
+                merged_bboxes.append({
+                    'text': merged_text,
+                    'sorted_combo': sorted_combo
+                })
+
+        for box in merged_bboxes:
+            # 1. 精确匹配
+            merged_text_normalized = self.text_matcher.normalize_text(box['text'])
+            if cell_text_normalized == merged_text_normalized:
+                last_sort_idx = boxes.index(box['sorted_combo'][-1])
+                return self._build_match_result(box['sorted_combo'], box['text'], 100.0, last_sort_idx)
+            
+            # 2. 子串匹配
+            is_substring = (cell_text_normalized in merged_text_normalized or 
+                        merged_text_normalized in cell_text_normalized)
+            
+            # 3. 模糊匹配
+            similarity = fuzz.partial_ratio(cell_text_normalized, merged_text_normalized)
+            
+            # 🎯 子串匹配加分
+            if is_substring:
+                similarity = min(100, similarity + 10)
+            
+            if similarity >= self.text_matcher.similarity_threshold:
+                print(f"         ✓ 匹配成功: '{cell_text[:15]}' vs '{merged_text[:15]}' (相似度: {similarity})")
+                return self._build_match_result(box['sorted_combo'], box['text'], similarity, start_idx)
+        
+        print(f"         ✗ 匹配失败: '{cell_text[:15]}'")
+        return None
+
+
+    def _build_match_result(self, boxes: List[Dict], text: str, 
+                        score: float, last_index: int) -> Dict:
+        """构建匹配结果(使用原始坐标)"""
+    
+        # 🔑 关键修复:使用 original_bbox(如果存在)
+        def get_original_bbox(box: Dict) -> List[int]:
+            return box.get('original_bbox', box['bbox'])
+        
+        original_bboxes = [get_original_bbox(b) for b in boxes]
+        
+        merged_bbox = [
+            min(b[0] for b in original_bboxes),
+            min(b[1] for b in original_bboxes),
+            max(b[2] for b in original_bboxes),
+            max(b[3] for b in original_bboxes)
+        ]
+        
+        return {
+            'bbox': merged_bbox,  # ✅ 使用原始坐标
+            'text': text,
+            'score': score,
+            'paddle_indices': [b['paddle_bbox_index'] for b in boxes],
+            'used_boxes': boxes,
+            'last_used_index': last_index
+        }

+ 953 - 0
merger/table_cell_matcher_v4.py

@@ -0,0 +1,953 @@
+"""
+表格单元格匹配器
+负责将 HTML 表格单元格与 PaddleOCR bbox 进行匹配
+"""
+from typing import List, Dict, Tuple, Optional
+from bs4 import BeautifulSoup
+import numpy as np
+
+try:
+    from rapidfuzz import fuzz
+except ImportError:
+    from fuzzywuzzy import fuzz
+
+try:
+    from .text_matcher import TextMatcher
+    from .bbox_extractor import BBoxExtractor
+except ImportError:
+    from text_matcher import TextMatcher
+    from bbox_extractor import BBoxExtractor
+
+class TableCellMatcher:
+    """表格单元格匹配器"""
+    
+    def __init__(self, text_matcher: TextMatcher, 
+                 x_tolerance: int = 3, 
+                 y_tolerance: int = 10):
+        """
+        Args:
+            text_matcher: 文本匹配器
+            x_tolerance: X轴容差(用于列边界判断)
+            y_tolerance: Y轴容差(用于行分组)
+        """
+        self.text_matcher = text_matcher
+        self.x_tolerance = x_tolerance
+        self.y_tolerance = y_tolerance
+    
+    def enhance_table_html_with_bbox(self, html: str, paddle_text_boxes: List[Dict],
+                                  start_pointer: int, table_bbox: Optional[List[int]] = None) -> Tuple[str, List[Dict], int]:
+        """
+        为 HTML 表格添加 bbox 信息(优化版:先筛选表格区域)
+        
+        策略:
+        1. 根据 table_bbox 筛选出表格区域内的 paddle_text_boxes
+        2. 将筛选后的 boxes 按行分组
+        3. 智能匹配 HTML 行与 paddle 行组
+        4. 在匹配的组内查找单元格
+    
+        Args:
+            html: HTML 表格
+            paddle_text_boxes: 全部 paddle OCR 结果
+            start_pointer: 开始位置
+            table_bbox: 表格边界框 [x1, y1, x2, y2]
+        """
+        soup = BeautifulSoup(html, 'html.parser')
+        cells = []
+        
+        # 🔑 第一步:筛选表格区域内的 paddle boxes
+        table_region_boxes, actual_table_bbox = self._filter_boxes_in_table_region(
+            paddle_text_boxes[start_pointer:],
+            table_bbox,
+            html
+        )
+        
+        if not table_region_boxes:
+            print(f"⚠️ 未在表格区域找到 paddle boxes")
+            return str(soup), cells, start_pointer
+        
+        print(f"📊 表格区域: {len(table_region_boxes)} 个文本框")
+        print(f"   边界: {actual_table_bbox}")
+        
+        # 🔑 第二步:将表格区域的 boxes 按行分组
+        grouped_boxes = self._group_paddle_boxes_by_rows(
+            table_region_boxes,
+            y_tolerance=self.y_tolerance,
+            auto_correct_skew=True
+        )
+        
+        # 🔑 第三步:在每组内按 x 坐标排序
+        for group in grouped_boxes:
+            group['boxes'].sort(key=lambda x: x['bbox'][0])
+        
+        grouped_boxes.sort(key=lambda g: g['y_center'])
+        
+        print(f"   分组: {len(grouped_boxes)} 行")
+        
+        # 🔑 第四步:智能匹配 HTML 行与 paddle 行组
+        html_rows = soup.find_all('tr')
+        row_mapping = self._match_html_rows_to_paddle_groups(html_rows, grouped_boxes)
+        
+        print(f"   HTML行: {len(html_rows)} 行")
+        print(f"   映射: {len([v for v in row_mapping.values() if v])} 个有效映射")
+        
+        # 🔑 第五步:遍历 HTML 表格,使用映射关系查找
+        for row_idx, row in enumerate(html_rows):
+            group_indices = row_mapping.get(row_idx, [])
+            
+            if not group_indices:
+                continue
+            
+            # 合并多个组的 boxes
+            current_boxes = []
+            for group_idx in group_indices:
+                if group_idx < len(grouped_boxes):
+                    current_boxes.extend(grouped_boxes[group_idx]['boxes'])
+            
+            current_boxes.sort(key=lambda x: x['bbox'][0])
+            
+            # 🎯 关键改进:提取 HTML 单元格并预先确定列边界
+            html_cells = row.find_all(['td', 'th'])
+            
+            if not html_cells:
+                continue
+            
+            # 🔑 预估列边界(基于 x 坐标分布)
+            col_boundaries = self._estimate_column_boundaries(
+                current_boxes, 
+                len(html_cells)
+            )
+            
+            print(f"   行 {row_idx + 1}: {len(html_cells)} 列,边界: {col_boundaries}")
+            
+            # 🎯 关键改进:顺序指针匹配
+            box_pointer = 0  # 当前行的 boxes 指针
+            
+            for col_idx, cell in enumerate(html_cells):
+                cell_text = cell.get_text(strip=True)
+                
+                if not cell_text:
+                    continue
+                
+                # 🔑 从当前指针开始匹配
+                matched_result = self._match_cell_sequential(
+                    cell_text,
+                    current_boxes,
+                    col_boundaries,
+                    box_pointer
+                )
+                
+                if matched_result:
+                    merged_bbox = matched_result['bbox']
+                    merged_text = matched_result['text']
+                    
+                    cell['data-bbox'] = f"[{merged_bbox[0]},{merged_bbox[1]},{merged_bbox[2]},{merged_bbox[3]}]"
+                    cell['data-score'] = f"{matched_result['score']:.4f}"
+                    cell['data-paddle-indices'] = str(matched_result['paddle_indices'])
+                    
+                    cells.append({
+                        'type': 'table_cell',
+                        'text': cell_text,
+                        'matched_text': merged_text,
+                        'bbox': merged_bbox,
+                        'row': row_idx + 1,
+                        'col': col_idx + 1,
+                        'score': matched_result['score'],
+                        'paddle_bbox_indices': matched_result['paddle_indices']
+                    })
+                    
+                    # 标记已使用
+                    for box in matched_result['used_boxes']:
+                        box['used'] = True
+                    
+                    # 🎯 移动指针到最后使用的 box 之后
+                    box_pointer = matched_result['last_used_index'] + 1
+                    
+                    print(f"      列 {col_idx + 1}: '{cell_text[:20]}...' 匹配 {len(matched_result['used_boxes'])} 个box (指针: {box_pointer})")
+        
+        # 计算新的指针位置
+        used_count = sum(1 for box in table_region_boxes if box.get('used'))
+        new_pointer = start_pointer + used_count
+        
+        print(f"   匹配: {len(cells)} 个单元格")
+        
+        return str(soup), cells, new_pointer
+
+
+    def _estimate_column_boundaries(self, boxes: List[Dict], 
+                                    num_cols: int) -> List[Tuple[int, int]]:
+        """
+        估算列边界(改进版:处理同列多文本框)
+        
+        Args:
+            boxes: 当前行的所有 boxes(已按 x 排序)
+            num_cols: HTML 表格的列数
+        
+        Returns:
+            列边界列表 [(x_start, x_end), ...]
+        """
+        if not boxes:
+            return []
+        
+        # 🔑 关键改进:先按 x 坐标聚类(合并同列的多个文本框)
+        x_clusters = self._cluster_boxes_by_x(boxes, x_tolerance=self.x_tolerance)
+        
+        print(f"      X聚类: {len(boxes)} 个boxes -> {len(x_clusters)} 个列簇")
+        
+        # 获取所有 x 坐标范围
+        x_min = min(cluster['x_min'] for cluster in x_clusters)
+        x_max = max(cluster['x_max'] for cluster in x_clusters)
+        
+        # 🎯 策略 1: 如果聚类数量<=列数接近
+        if len(x_clusters) <= num_cols:
+            # 直接使用聚类边界
+            boundaries = [(cluster['x_min'], cluster['x_max']) 
+                        for cluster in x_clusters]
+            return boundaries
+        
+        # 🎯 策略 2: 聚类数多于列数(某些列有多个文本簇)
+        if len(x_clusters) > num_cols:
+            print(f"      ℹ️ 聚类数 {len(x_clusters)} > 列数 {num_cols},合并相近簇")
+            
+            # 合并相近的簇
+            merged_clusters = self._merge_close_clusters(x_clusters, num_cols)
+            
+            boundaries = [(cluster['x_min'], cluster['x_max']) 
+                        for cluster in merged_clusters]
+            return boundaries
+        
+        return []
+
+
+    def _cluster_boxes_by_x(self, boxes: List[Dict], 
+                    x_tolerance: int = 3) -> List[Dict]:
+        """
+        按 x 坐标聚类(合并同列的多个文本框)
+        
+        Args:
+            boxes: 文本框列表
+            x_tolerance: X坐标容忍度
+        
+        Returns:
+            聚类列表 [{'x_min': int, 'x_max': int, 'boxes': List[Dict]}, ...]
+        """
+        if not boxes:
+            return []
+        
+        # 按左边界 x 坐标排序
+        sorted_boxes = sorted(boxes, key=lambda b: b['bbox'][0])
+        
+        clusters = []
+        current_cluster = None
+        
+        for box in sorted_boxes:
+            bbox = box['bbox']
+            x_start = bbox[0]
+            x_end = bbox[2]
+            
+            if current_cluster is None:
+                # 开始新簇
+                current_cluster = {
+                    'x_min': x_start,
+                    'x_max': x_end,
+                    'boxes': [box]
+                }
+            else:
+                # 🔑 检查是否属于当前簇(修正后的逻辑)
+                # 1. x 坐标有重叠:x_start <= current_x_max 且 x_end >= current_x_min
+                # 2. 或者距离在容忍度内
+            
+                has_overlap = (x_start <= current_cluster['x_max'] and 
+                              x_end >= current_cluster['x_min'])
+            
+                is_close = abs(x_start - current_cluster['x_max']) <= x_tolerance
+            
+                if has_overlap or is_close:
+                    # 合并到当前簇
+                    current_cluster['boxes'].append(box)
+                    current_cluster['x_min'] = min(current_cluster['x_min'], x_start)
+                    current_cluster['x_max'] = max(current_cluster['x_max'], x_end)
+                else:
+                    # 保存当前簇,开始新簇
+                    clusters.append(current_cluster)
+                    current_cluster = {
+                        'x_min': x_start,
+                        'x_max': x_end,
+                        'boxes': [box]
+                    }
+    
+        # 添加最后一簇
+        if current_cluster:
+            clusters.append(current_cluster)
+        
+        return clusters
+
+
+    def _merge_close_clusters(self, clusters: List[Dict], 
+                            target_count: int) -> List[Dict]:
+        """
+        合并相近的簇,直到数量等于目标列数
+        
+        Args:
+            clusters: 聚类列表
+            target_count: 目标列数
+        
+        Returns:
+            合并后的聚类列表
+        """
+        if len(clusters) <= target_count:
+            return clusters
+        
+        # 复制一份,避免修改原数据
+        working_clusters = [c.copy() for c in clusters]
+        
+        while len(working_clusters) > target_count:
+            # 找到距离最近的两个簇
+            min_distance = float('inf')
+            merge_idx = 0
+            
+            for i in range(len(working_clusters) - 1):
+                distance = working_clusters[i + 1]['x_min'] - working_clusters[i]['x_max']
+                if distance < min_distance:
+                    min_distance = distance
+                    merge_idx = i
+            
+            # 合并
+            cluster1 = working_clusters[merge_idx]
+            cluster2 = working_clusters[merge_idx + 1]
+            
+            merged_cluster = {
+                'x_min': cluster1['x_min'],
+                'x_max': cluster2['x_max'],
+                'boxes': cluster1['boxes'] + cluster2['boxes']
+            }
+            
+            # 替换
+            working_clusters[merge_idx] = merged_cluster
+            working_clusters.pop(merge_idx + 1)
+        
+        return working_clusters
+
+
+    def _get_boxes_in_column(self, boxes: List[Dict], 
+                            boundaries: List[Tuple[int, int]],
+                            col_idx: int) -> List[Dict]:
+        """
+        获取指定列范围内的 boxes(改进版:包含重叠)
+        
+        Args:
+            boxes: 当前行的所有 boxes
+            boundaries: 列边界
+            col_idx: 列索引
+        
+        Returns:
+            该列的 boxes
+        """
+        if col_idx >= len(boundaries):
+            return []
+        
+        x_start, x_end = boundaries[col_idx]
+        
+        col_boxes = []
+        for box in boxes:
+            bbox = box['bbox']
+            box_x_start = bbox[0]
+            box_x_end = bbox[2]
+            
+            # 🔑 改进:检查是否有重叠(不只是中心点)
+            overlap = not (box_x_start > x_end or box_x_end < x_start)
+            
+            if overlap:
+                col_boxes.append(box)
+        
+        return col_boxes
+
+
+    def _filter_boxes_in_table_region(self, paddle_boxes: List[Dict],
+                                  table_bbox: Optional[List[int]],
+                                  html: str) -> Tuple[List[Dict], List[int]]:
+        """
+        筛选表格区域内的 paddle boxes
+    
+        策略:
+        1. 如果有 table_bbox,使用边界框筛选(扩展边界)
+        2. 如果没有 table_bbox,通过内容匹配推断区域
+    
+        Args:
+            paddle_boxes: paddle OCR 结果
+            table_bbox: 表格边界框 [x1, y1, x2, y2]
+            html: HTML 内容(用于内容验证)
+    
+        Returns:
+            (筛选后的 boxes, 实际表格边界框)
+        """
+        if not paddle_boxes:
+            return [], [0, 0, 0, 0]
+        
+        # 🎯 策略 1: 使用提供的 table_bbox(扩展边界)
+        if table_bbox and len(table_bbox) == 4:
+            x1, y1, x2, y2 = table_bbox
+            
+            # 扩展边界(考虑边框外的文本)
+            margin = 20
+            expanded_bbox = [
+                max(0, x1 - margin),
+                max(0, y1 - margin),
+                x2 + margin,
+                y2 + margin
+            ]
+            
+            filtered = []
+            for box in paddle_boxes:
+                bbox = box['bbox']
+                box_center_x = (bbox[0] + bbox[2]) / 2
+                box_center_y = (bbox[1] + bbox[3]) / 2
+                
+                # 中心点在扩展区域内
+                if (expanded_bbox[0] <= box_center_x <= expanded_bbox[2] and
+                    expanded_bbox[1] <= box_center_y <= expanded_bbox[3]):
+                    filtered.append(box)
+            
+            if filtered:
+                # 计算实际边界框
+                actual_bbox = [
+                    min(b['bbox'][0] for b in filtered),
+                    min(b['bbox'][1] for b in filtered),
+                    max(b['bbox'][2] for b in filtered),
+                    max(b['bbox'][3] for b in filtered)
+                ]
+                return filtered, actual_bbox
+        
+        # 🎯 策略 2: 通过内容匹配推断区域
+        print("   ℹ️ 无 table_bbox,使用内容匹配推断表格区域...")
+        
+        # 提取 HTML 中的所有文本
+        from bs4 import BeautifulSoup
+        soup = BeautifulSoup(html, 'html.parser')
+        html_texts = set()
+        for cell in soup.find_all(['td', 'th']):
+            text = cell.get_text(strip=True)
+            if text:
+                html_texts.add(self.text_matcher.normalize_text(text))
+        
+        if not html_texts:
+            return [], [0, 0, 0, 0]
+        
+        # 找出与 HTML 内容匹配的 boxes
+        matched_boxes = []
+        for box in paddle_boxes:
+            normalized_text = self.text_matcher.normalize_text(box['text'])
+            
+            # 检查是否匹配
+            if any(normalized_text in ht or ht in normalized_text 
+                   for ht in html_texts):
+                matched_boxes.append(box)
+        
+        if not matched_boxes:
+            # 🔑 降级:如果精确匹配失败,使用模糊匹配
+            print("   ℹ️ 精确匹配失败,尝试模糊匹配...")
+            
+            for box in paddle_boxes:
+                normalized_text = self.text_matcher.normalize_text(box['text'])
+                
+                for ht in html_texts:
+                    similarity = fuzz.partial_ratio(normalized_text, ht)
+                    if similarity >= 70:  # 降低阈值
+                        matched_boxes.append(box)
+                        break
+    
+        if matched_boxes:
+            # 计算边界框
+            actual_bbox = [
+                min(b['bbox'][0] for b in matched_boxes),
+                min(b['bbox'][1] for b in matched_boxes),
+                max(b['bbox'][2] for b in matched_boxes),
+                max(b['bbox'][3] for b in matched_boxes)
+            ]
+            
+            # 🔑 扩展边界,包含可能遗漏的文本
+            margin = 30
+            expanded_bbox = [
+                max(0, actual_bbox[0] - margin),
+                max(0, actual_bbox[1] - margin),
+                actual_bbox[2] + margin,
+                actual_bbox[3] + margin
+            ]
+            
+            # 重新筛选(包含边界上的文本)
+            final_filtered = []
+            for box in paddle_boxes:
+                bbox = box['bbox']
+                box_center_x = (bbox[0] + bbox[2]) / 2
+                box_center_y = (bbox[1] + bbox[3]) / 2
+                
+                if (expanded_bbox[0] <= box_center_x <= expanded_bbox[2] and
+                    expanded_bbox[1] <= box_center_y <= expanded_bbox[3]):
+                    final_filtered.append(box)
+            
+            return final_filtered, actual_bbox
+        
+        # 🔑 最后的降级:返回所有 boxes
+        print("   ⚠️ 无法确定表格区域,使用所有 paddle boxes")
+        if paddle_boxes:
+            actual_bbox = [
+                min(b['bbox'][0] for b in paddle_boxes),
+                min(b['bbox'][1] for b in paddle_boxes),
+                max(b['bbox'][2] for b in paddle_boxes),
+                max(b['bbox'][3] for b in paddle_boxes)
+            ]
+            return paddle_boxes, actual_bbox
+        
+        return [], [0, 0, 0, 0]
+
+    def _group_paddle_boxes_by_rows(self, paddle_boxes: List[Dict], 
+                                    y_tolerance: int = 10,
+                                    auto_correct_skew: bool = True) -> List[Dict]:
+        """
+        将 paddle_text_boxes 按 y 坐标分组(聚类)- 增强版本
+    
+        Args:
+            paddle_boxes: Paddle OCR 文字框列表
+            y_tolerance: Y 坐标容忍度(像素)
+            auto_correct_skew: 是否自动校正倾斜
+    
+        Returns:
+            分组列表,每组包含 {'y_center': float, 'boxes': List[Dict]}
+        """
+        if not paddle_boxes:
+            return []
+        
+        # 🎯 步骤 1: 检测并校正倾斜(使用 BBoxExtractor)
+        if auto_correct_skew:
+            rotation_angle = BBoxExtractor.calculate_skew_angle(paddle_boxes)
+            
+            if abs(rotation_angle) > 0.5:
+                max_x = max(box['bbox'][2] for box in paddle_boxes)
+                max_y = max(box['bbox'][3] for box in paddle_boxes)
+                image_size = (max_x, max_y)
+                
+                print(f"   🔧 校正倾斜角度: {rotation_angle:.2f}°")
+                paddle_boxes = BBoxExtractor.correct_boxes_skew(
+                    paddle_boxes, -rotation_angle, image_size
+                )
+        
+        # 🎯 步骤 2: 按校正后的 y 坐标分组
+        boxes_with_y = []
+        for box in paddle_boxes:
+            bbox = box['bbox']
+            y_center = (bbox[1] + bbox[3]) / 2
+            boxes_with_y.append({
+                'y_center': y_center,
+                'box': box
+            })
+        
+        # 按 y 坐标排序
+        boxes_with_y.sort(key=lambda x: x['y_center'])
+        
+        groups = []
+        current_group = None
+        
+        for item in boxes_with_y:
+            if current_group is None:
+                # 开始新组
+                current_group = {
+                    'y_center': item['y_center'],
+                    'boxes': [item['box']]
+                }
+            else:
+                if abs(item['y_center'] - current_group['y_center']) <= y_tolerance:
+                    current_group['boxes'].append(item['box'])
+                    # 更新组的中心
+                    current_group['y_center'] = sum(
+                        (b['bbox'][1] + b['bbox'][3]) / 2 for b in current_group['boxes']
+                    ) / len(current_group['boxes'])
+                else:
+                    groups.append(current_group)
+                    current_group = {
+                        'y_center': item['y_center'],
+                        'boxes': [item['box']]
+                    }
+        
+        if current_group:
+            groups.append(current_group)
+        
+        print(f"   ✓ 分组完成: {len(groups)} 行")
+        
+        return groups
+
+
+    def _match_html_rows_to_paddle_groups(self, html_rows: List, 
+                                        grouped_boxes: List[Dict]) -> Dict[int, List[int]]:
+        """
+        智能匹配 HTML 行与 paddle 分组(优化版:支持跳过无关组 + 防贪婪)
+        """
+        if not html_rows or not grouped_boxes:
+            return {}
+        
+        mapping = {}
+        
+        # 🎯 策略 1: 数量相等,简单 1:1 映射
+        if len(html_rows) == len(grouped_boxes):
+            for i in range(len(html_rows)):
+                mapping[i] = [i]
+            return mapping
+        
+        # --- 准备数据 ---
+        # 提取 HTML 文本
+        html_row_texts = []
+        for row in html_rows:
+            cells = row.find_all(['td', 'th'])
+            texts = [self.text_matcher.normalize_text(c.get_text(strip=True)) for c in cells]
+            html_row_texts.append("".join(texts))
+
+        # 预计算所有组的文本
+        group_texts = []
+        for group in grouped_boxes:
+            boxes = group['boxes']
+            texts = [self.text_matcher.normalize_text(b['text']) for b in boxes]
+            group_texts.append("".join(texts))
+
+        n_html = len(html_row_texts)
+        n_paddle = len(grouped_boxes)
+
+        # --- 动态规划 (DP) ---
+        # dp[i][j] 表示:HTML 前 i 行 (0..i) 匹配到了 Paddle 的前 j 组 (0..j,且第 j 组被第 i 行使用了) 的最大得分
+        # 初始化为负无穷
+        dp = np.full((n_html, n_paddle), -np.inf)
+        # 记录路径:path[i][j] = (prev_j, start_j) 
+        # prev_j: 上一行结束的 paddle index
+        # start_j: 当前行开始的 paddle index (因为一行可能对应多个组)
+        path = {} 
+
+        # 参数配置
+        MAX_MERGE = 4       # 一行 HTML 最多合并多少个 Paddle 组
+        SEARCH_WINDOW = 15  # 向前搜索窗口
+        SKIP_PENALTY = 0.1  # 跳过一个 Paddle 组的惩罚
+        
+        # --- 1. 初始化第一行 (HTML Row 0) ---
+        # 第一行可以匹配 Paddle 的第 0 到 SEARCH_WINDOW 组开始的序列
+        for end_j in range(min(n_paddle, SEARCH_WINDOW + MAX_MERGE)):
+            for start_j in range(max(0, end_j - MAX_MERGE + 1), end_j + 1):
+                # 计算当前合并组的文本
+                current_text = "".join(group_texts[start_j : end_j + 1])
+                similarity = self._calculate_similarity(html_row_texts[0], current_text)
+                
+                # 惩罚:跳过了 start_j 之前的组
+                penalty = start_j * SKIP_PENALTY
+                
+                score = similarity - penalty
+                
+                # 只有得分尚可才作为有效状态
+                if score > 0.1:
+                    if score > dp[0][end_j]:
+                        dp[0][end_j] = score
+                        path[(0, end_j)] = (-1, start_j)
+
+        # --- 2. 状态转移 (HTML Row 1 to N) ---
+        for i in range(1, n_html):
+            html_text = html_row_texts[i]
+            if not html_text: # 空行处理
+                # 延续上一行的最佳状态,不消耗 paddle 组
+                for j in range(n_paddle):
+                    if dp[i-1][j] > -np.inf:
+                        dp[i][j] = dp[i-1][j]
+                        path[(i, j)] = (j, j + 1) # start_j = j+1 表示没用新组
+                continue
+
+            # 遍历上一行的结束位置 prev_j
+            # 优化:只遍历有有效分数的 prev_j
+            valid_prev_indices = [j for j in range(n_paddle) if dp[i-1][j] > -np.inf]
+            
+            for prev_j in valid_prev_indices:
+                # 当前行从 prev_j + 1 开始匹配
+                # 允许跳过一些组 (gap),但不能太多
+                for gap in range(SEARCH_WINDOW):
+                    start_j = prev_j + 1 + gap
+                    if start_j >= n_paddle:
+                        break
+                    
+                    current_text = ""
+                    # 尝试合并 1 到 MAX_MERGE 个组
+                    for k in range(MAX_MERGE):
+                        end_j = start_j + k
+                        if end_j >= n_paddle:
+                            break
+                        
+                        current_text += group_texts[end_j]
+                        
+                        # 计算相似度
+                        similarity = self._calculate_similarity(html_text, current_text)
+                        
+                        # 计算惩罚
+                        # 1. 跳过惩罚 (gap)
+                        # 2. 长度惩罚 (防止过度合并)
+                        len_penalty = 0.0
+                        if len(html_text) > 0:
+                            ratio = len(current_text) / len(html_text)
+                            if ratio > 2.0: len_penalty = (ratio - 2.0) * 0.2
+
+                        current_score = similarity - (gap * SKIP_PENALTY) - len_penalty
+                        
+                        # 只有正收益才转移
+                        if current_score > 0.1:
+                            total_score = dp[i-1][prev_j] + current_score
+                            
+                            if total_score > dp[i][end_j]:
+                                dp[i][end_j] = total_score
+                                path[(i, end_j)] = (prev_j, start_j)
+
+        # --- 3. 回溯找最优路径 ---
+        # 找到最后一行得分最高的结束位置
+        best_end_j = -1
+        max_score = -np.inf
+        
+        # 优先找最后一行,如果最后一行没匹配上,往前找
+        found_end = False
+        for i in range(n_html - 1, -1, -1):
+            for j in range(n_paddle):
+                if dp[i][j] > max_score:
+                    max_score = dp[i][j]
+                    best_end_j = j
+                    best_last_row = i
+            if max_score > -np.inf:
+                found_end = True
+                break
+        
+        mapping = {}
+        used_groups = set()
+        
+        if found_end:
+            curr_i = best_last_row
+            curr_j = best_end_j
+            
+            while curr_i >= 0:
+                if (curr_i, curr_j) in path:
+                    prev_j, start_j = path[(curr_i, curr_j)]
+                    
+                    # 记录当前行的匹配 (start_j 到 curr_j)
+                    # 注意:如果 start_j > curr_j,说明是空行或者没匹配到新组
+                    if start_j <= curr_j:
+                        indices = list(range(start_j, curr_j + 1))
+                        mapping[curr_i] = indices
+                        used_groups.update(indices)
+                    else:
+                        mapping[curr_i] = []
+                    
+                    curr_j = prev_j
+                    curr_i -= 1
+                else:
+                    break
+        
+        # 填补未匹配的行
+        for i in range(n_html):
+            if i not in mapping:
+                mapping[i] = []
+
+        # --- 4. 后处理:未匹配组的归属 (Orphans) ---
+        unused_groups = [i for i in range(len(grouped_boxes)) if i not in used_groups]
+        
+        if unused_groups:
+            print(f"   ℹ️ 发现 {len(unused_groups)} 个未匹配的 paddle 组: {unused_groups}")
+            for unused_idx in unused_groups:
+                unused_group = grouped_boxes[unused_idx]
+                unused_y_min = min(b['bbox'][1] for b in unused_group['boxes'])
+                unused_y_max = max(b['bbox'][3] for b in unused_group['boxes'])
+                
+                above_idx = None
+                below_idx = None
+                above_distance = float('inf')
+                below_distance = float('inf')
+                
+                for i in range(unused_idx - 1, -1, -1):
+                    if i in used_groups:
+                        above_idx = i
+                        above_group = grouped_boxes[i]
+                        max_y_box = max(above_group['boxes'], key=lambda b: b['bbox'][3])
+                        above_y_center = (max_y_box['bbox'][1] + max_y_box['bbox'][3]) / 2
+                        above_distance = abs(unused_y_min - above_y_center)
+                        break
+                
+                for i in range(unused_idx + 1, len(grouped_boxes)):
+                    if i in used_groups:
+                        below_idx = i
+                        below_group = grouped_boxes[i]
+                        min_y_box = min(below_group['boxes'], key=lambda b: b['bbox'][1])
+                        below_y_center = (min_y_box['bbox'][1] + min_y_box['bbox'][3]) / 2
+                        below_distance = abs(below_y_center - unused_y_max)
+                        break
+                
+                closest_used_idx = None
+                merge_direction = ""
+                
+                if above_idx is not None and below_idx is not None:
+                    if above_distance < below_distance:
+                        closest_used_idx = above_idx
+                        merge_direction = "上方"
+                    else:
+                        closest_used_idx = below_idx
+                        merge_direction = "下方"
+                elif above_idx is not None:
+                    closest_used_idx = above_idx
+                    merge_direction = "上方"
+                elif below_idx is not None:
+                    closest_used_idx = below_idx
+                    merge_direction = "下方"
+                
+                if closest_used_idx is not None:
+                    target_html_row = None
+                    for html_row_idx, group_indices in mapping.items():
+                        if closest_used_idx in group_indices:
+                            target_html_row = html_row_idx
+                            break
+                    
+                    if target_html_row is not None:
+                        if unused_idx not in mapping[target_html_row]:
+                            mapping[target_html_row].append(unused_idx)
+                            mapping[target_html_row].sort()
+                            print(f"      • 组 {unused_idx} 合并到 HTML 行 {target_html_row}({merge_direction}行)")                
+                used_groups.add(unused_idx)
+        
+        # 🔑 策略 4: 第三遍 - 按 y 坐标排序每行的组索引
+        for row_idx in mapping:
+            if mapping[row_idx]:
+                mapping[row_idx].sort(key=lambda idx: grouped_boxes[idx]['y_center'])
+        
+        return mapping
+
+    def _calculate_similarity(self, text1: str, text2: str) -> float:
+        """
+        计算两个文本的相似度,结合字符覆盖率和序列相似度
+        """
+        if not text1 or not text2:
+            return 0.0
+            
+        # 1. 字符覆盖率 (Character Overlap) - 解决乱序/交错问题
+        from collections import Counter
+        c1 = Counter(text1)
+        c2 = Counter(text2)
+        
+        # 计算交集字符数
+        intersection = c1 & c2
+        overlap_count = sum(intersection.values())
+        
+        # 覆盖率:paddle 文本中有多少是 html 文本需要的
+        coverage = overlap_count / len(text1) if len(text1) > 0 else 0
+        
+        # 2. 序列相似度 (Sequence Similarity) - 解决完全不相关但字符相似的问题
+        # 使用 token_sort_ratio 来容忍一定的乱序
+        seq_score = fuzz.token_sort_ratio(text1, text2) / 100.0
+        
+        # 综合评分:侧重覆盖率,因为对于 OCR 结果合并,内容完整性比顺序更重要
+        return (coverage * 0.7) + (seq_score * 0.3)
+
+
+    def _match_cell_sequential(self, cell_text: str, 
+                            boxes: List[Dict],
+                            col_boundaries: List[Tuple[int, int]],
+                            start_idx: int) -> Optional[Dict]:
+        """
+        🎯 顺序匹配单元格:从指定位置开始,逐步合并 boxes 直到匹配
+        
+        策略:
+        1. 找到第一个未使用的 box
+        2. 尝试单个 box 精确匹配
+        3. 如果失败,尝试合并多个 boxes
+        
+        Args:
+            cell_text: HTML 单元格文本
+            boxes: 候选 boxes(已按 x 坐标排序)
+            col_boundaries: 列边界列表
+            start_idx: 起始索引
+        
+        Returns:
+            {'bbox': [x1,y1,x2,y2], 'text': str, 'score': float, 
+            'paddle_indices': [idx1, idx2], 'used_boxes': [box1, box2],
+            'last_used_index': int}
+        """
+        cell_text_normalized = self.text_matcher.normalize_text(cell_text)
+        
+        if len(cell_text_normalized) < 2:
+            return None
+        
+        # 🔑 找到第一个未使用的 box
+        first_unused_idx = start_idx
+        while first_unused_idx < len(boxes) and boxes[first_unused_idx].get('used'):
+            first_unused_idx += 1
+        
+        if first_unused_idx >= len(boxes):
+            return None
+
+        # 🔑 策略 1: 单个 box 精确匹配
+        for box in boxes[first_unused_idx:]:
+            if box.get('used'):
+                continue
+            
+            box_text = self.text_matcher.normalize_text(box['text'])
+            
+            if cell_text_normalized == box_text:
+                return self._build_match_result([box], box['text'], 100.0, boxes.index(box))
+        
+        # 🔑 策略 2: 多个 boxes 合并匹配
+        unused_boxes = [b for b in boxes if not b.get('used')]
+        # 合并同列的 boxes 合并
+        merged_bboxes = []
+        for col_idx in range(len(col_boundaries)):
+            combo_boxes = self._get_boxes_in_column(unused_boxes, col_boundaries, col_idx)
+            if len(combo_boxes) > 0:
+                sorted_combo = sorted(combo_boxes, key=lambda b: (b['bbox'][1], b['bbox'][0]))
+                merged_text = ''.join([b['text'] for b in sorted_combo])
+                merged_bboxes.append({
+                    'text': merged_text,
+                    'sorted_combo': sorted_combo
+                })
+
+        for box in merged_bboxes:
+            # 1. 精确匹配
+            merged_text_normalized = self.text_matcher.normalize_text(box['text'])
+            if cell_text_normalized == merged_text_normalized:
+                last_sort_idx = boxes.index(box['sorted_combo'][-1])
+                return self._build_match_result(box['sorted_combo'], box['text'], 100.0, last_sort_idx)
+            
+            # 2. 子串匹配
+            is_substring = (cell_text_normalized in merged_text_normalized or 
+                        merged_text_normalized in cell_text_normalized)
+            
+            # 3. 模糊匹配
+            similarity = fuzz.partial_ratio(cell_text_normalized, merged_text_normalized)
+            
+            # 🎯 子串匹配加分
+            if is_substring:
+                similarity = min(100, similarity + 10)
+            
+            if similarity >= self.text_matcher.similarity_threshold:
+                print(f"         ✓ 匹配成功: '{cell_text[:15]}' vs '{merged_text[:15]}' (相似度: {similarity})")
+                return self._build_match_result(box['sorted_combo'], box['text'], similarity, start_idx)
+        
+        print(f"         ✗ 匹配失败: '{cell_text[:15]}'")
+        return None
+
+
+    def _build_match_result(self, boxes: List[Dict], text: str, 
+                        score: float, last_index: int) -> Dict:
+        """构建匹配结果(使用原始坐标)"""
+    
+        # 🔑 关键修复:使用 original_bbox(如果存在)
+        def get_original_bbox(box: Dict) -> List[int]:
+            return box.get('original_bbox', box['bbox'])
+        
+        original_bboxes = [get_original_bbox(b) for b in boxes]
+        
+        merged_bbox = [
+            min(b[0] for b in original_bboxes),
+            min(b[1] for b in original_bboxes),
+            max(b[2] for b in original_bboxes),
+            max(b[3] for b in original_bboxes)
+        ]
+        
+        return {
+            'bbox': merged_bbox,  # ✅ 使用原始坐标
+            'text': text,
+            'score': score,
+            'paddle_indices': [b['paddle_bbox_index'] for b in boxes],
+            'used_boxes': boxes,
+            'last_used_index': last_index
+        }