"""
表格单元格匹配器
负责将 HTML 表格单元格与 PaddleOCR bbox 进行匹配
"""
from typing import List, Dict, Tuple, Optional
from bs4 import BeautifulSoup
import numpy as np
try:
from .text_matcher import TextMatcher
except ImportError:
from text_matcher import TextMatcher
class TableCellMatcher:
"""表格单元格匹配器"""
def __init__(self, text_matcher: TextMatcher,
x_tolerance: int = 3,
y_tolerance: int = 10):
"""
Args:
text_matcher: 文本匹配器
x_tolerance: X轴容差(用于列边界判断)
y_tolerance: Y轴容差(用于行分组)
"""
self.text_matcher = text_matcher
self.x_tolerance = x_tolerance
self.y_tolerance = y_tolerance
def enhance_table_html_with_bbox(self, html: str, paddle_text_boxes: List[Dict],
start_pointer: int, table_bbox: Optional[List[int]] = None) -> Tuple[str, List[Dict], int]:
"""
为 HTML 表格添加 bbox 信息(优化版:先筛选表格区域)
策略:
1. 根据 table_bbox 筛选出表格区域内的 paddle_text_boxes
2. 将筛选后的 boxes 按行分组
3. 智能匹配 HTML 行与 paddle 行组
4. 在匹配的组内查找单元格
Args:
html: HTML 表格
paddle_text_boxes: 全部 paddle OCR 结果
start_pointer: 开始位置
table_bbox: 表格边界框 [x1, y1, x2, y2]
"""
soup = BeautifulSoup(html, 'html.parser')
cells = []
# 🔑 第一步:筛选表格区域内的 paddle boxes
table_region_boxes, actual_table_bbox = self._filter_boxes_in_table_region(
paddle_text_boxes[start_pointer:],
table_bbox,
html
)
if not table_region_boxes:
print(f"⚠️ 未在表格区域找到 paddle boxes")
return str(soup), cells, start_pointer
print(f"📊 表格区域: {len(table_region_boxes)} 个文本框")
print(f" 边界: {actual_table_bbox}")
# 🔑 第二步:将表格区域的 boxes 按行分组
grouped_boxes = self._group_paddle_boxes_by_rows(
table_region_boxes,
y_tolerance=self.y_tolerance,
auto_correct_skew=True
)
# 🔑 第三步:在每组内按 x 坐标排序
for group in grouped_boxes:
group['boxes'].sort(key=lambda x: x['bbox'][0])
grouped_boxes.sort(key=lambda g: g['y_center'])
print(f" 分组: {len(grouped_boxes)} 行")
# 🔑 第四步:智能匹配 HTML 行与 paddle 行组
html_rows = soup.find_all('tr')
row_mapping = self._match_html_rows_to_paddle_groups(html_rows, grouped_boxes)
print(f" HTML行: {len(html_rows)} 行")
print(f" 映射: {len([v for v in row_mapping.values() if v])} 个有效映射")
# 🔑 第五步:遍历 HTML 表格,使用映射关系查找
for row_idx, row in enumerate(html_rows):
group_indices = row_mapping.get(row_idx, [])
if not group_indices:
continue
# 合并多个组的 boxes
current_boxes = []
for group_idx in group_indices:
if group_idx < len(grouped_boxes):
current_boxes.extend(grouped_boxes[group_idx]['boxes'])
current_boxes.sort(key=lambda x: x['bbox'][0])
# 🎯 关键改进:提取 HTML 单元格并预先确定列边界
html_cells = row.find_all(['td', 'th'])
if not html_cells:
continue
# 🔑 预估列边界(基于 x 坐标分布)
col_boundaries = self._estimate_column_boundaries(
current_boxes,
len(html_cells)
)
print(f" 行 {row_idx + 1}: {len(html_cells)} 列,边界: {col_boundaries}")
# 🎯 关键改进:顺序指针匹配
box_pointer = 0 # 当前行的 boxes 指针
for col_idx, cell in enumerate(html_cells):
cell_text = cell.get_text(strip=True)
if not cell_text:
continue
# 🔑 从当前指针开始匹配
matched_result = self._match_cell_sequential(
cell_text,
current_boxes,
col_boundaries,
box_pointer
)
if matched_result:
merged_bbox = matched_result['bbox']
merged_text = matched_result['text']
cell['data-bbox'] = f"[{merged_bbox[0]},{merged_bbox[1]},{merged_bbox[2]},{merged_bbox[3]}]"
cell['data-score'] = f"{matched_result['score']:.4f}"
cell['data-paddle-indices'] = str(matched_result['paddle_indices'])
cells.append({
'type': 'table_cell',
'text': cell_text,
'matched_text': merged_text,
'bbox': merged_bbox,
'row': row_idx + 1,
'col': col_idx + 1,
'score': matched_result['score'],
'paddle_bbox_indices': matched_result['paddle_indices']
})
# 标记已使用
for box in matched_result['used_boxes']:
box['used'] = True
# 🎯 移动指针到最后使用的 box 之后
box_pointer = matched_result['last_used_index'] + 1
print(f" 列 {col_idx + 1}: '{cell_text[:20]}...' 匹配 {len(matched_result['used_boxes'])} 个box (指针: {box_pointer})")
# 计算新的指针位置
used_count = sum(1 for box in table_region_boxes if box.get('used'))
new_pointer = start_pointer + used_count
print(f" 匹配: {len(cells)} 个单元格")
return str(soup), cells, new_pointer
def _estimate_column_boundaries(self, boxes: List[Dict],
num_cols: int) -> List[Tuple[int, int]]:
"""
估算列边界(改进版:处理同列多文本框)
Args:
boxes: 当前行的所有 boxes(已按 x 排序)
num_cols: HTML 表格的列数
Returns:
列边界列表 [(x_start, x_end), ...]
"""
if not boxes:
return []
# 🔑 关键改进:先按 x 坐标聚类(合并同列的多个文本框)
x_clusters = self._cluster_boxes_by_x(boxes, x_tolerance=self.x_tolerance)
print(f" X聚类: {len(boxes)} 个boxes -> {len(x_clusters)} 个列簇")
# 获取所有 x 坐标范围
x_min = min(cluster['x_min'] for cluster in x_clusters)
x_max = max(cluster['x_max'] for cluster in x_clusters)
# 🎯 策略 1: 如果聚类数量<=列数接近
if len(x_clusters) <= num_cols:
# 直接使用聚类边界
boundaries = [(cluster['x_min'], cluster['x_max'])
for cluster in x_clusters]
return boundaries
# 🎯 策略 2: 聚类数多于列数(某些列有多个文本簇)
if len(x_clusters) > num_cols:
print(f" ℹ️ 聚类数 {len(x_clusters)} > 列数 {num_cols},合并相近簇")
# 合并相近的簇
merged_clusters = self._merge_close_clusters(x_clusters, num_cols)
boundaries = [(cluster['x_min'], cluster['x_max'])
for cluster in merged_clusters]
return boundaries
return []
def _cluster_boxes_by_x(self, boxes: List[Dict],
x_tolerance: int = 3) -> List[Dict]:
"""
按 x 坐标聚类(合并同列的多个文本框)
Args:
boxes: 文本框列表
x_tolerance: X坐标容忍度
Returns:
聚类列表 [{'x_min': int, 'x_max': int, 'boxes': List[Dict]}, ...]
"""
if not boxes:
return []
# 按左边界 x 坐标排序
sorted_boxes = sorted(boxes, key=lambda b: b['bbox'][0])
clusters = []
current_cluster = None
for box in sorted_boxes:
bbox = box['bbox']
x_start = bbox[0]
x_end = bbox[2]
if current_cluster is None:
# 开始新簇
current_cluster = {
'x_min': x_start,
'x_max': x_end,
'boxes': [box]
}
else:
# 🔑 检查是否属于当前簇(修正后的逻辑)
# 1. x 坐标有重叠:x_start <= current_x_max 且 x_end >= current_x_min
# 2. 或者距离在容忍度内
has_overlap = (x_start <= current_cluster['x_max'] and
x_end >= current_cluster['x_min'])
is_close = abs(x_start - current_cluster['x_max']) <= x_tolerance
if has_overlap or is_close:
# 合并到当前簇
current_cluster['boxes'].append(box)
current_cluster['x_min'] = min(current_cluster['x_min'], x_start)
current_cluster['x_max'] = max(current_cluster['x_max'], x_end)
else:
# 保存当前簇,开始新簇
clusters.append(current_cluster)
current_cluster = {
'x_min': x_start,
'x_max': x_end,
'boxes': [box]
}
# 添加最后一簇
if current_cluster:
clusters.append(current_cluster)
return clusters
def _merge_close_clusters(self, clusters: List[Dict],
target_count: int) -> List[Dict]:
"""
合并相近的簇,直到数量等于目标列数
Args:
clusters: 聚类列表
target_count: 目标列数
Returns:
合并后的聚类列表
"""
if len(clusters) <= target_count:
return clusters
# 复制一份,避免修改原数据
working_clusters = [c.copy() for c in clusters]
while len(working_clusters) > target_count:
# 找到距离最近的两个簇
min_distance = float('inf')
merge_idx = 0
for i in range(len(working_clusters) - 1):
distance = working_clusters[i + 1]['x_min'] - working_clusters[i]['x_max']
if distance < min_distance:
min_distance = distance
merge_idx = i
# 合并
cluster1 = working_clusters[merge_idx]
cluster2 = working_clusters[merge_idx + 1]
merged_cluster = {
'x_min': cluster1['x_min'],
'x_max': cluster2['x_max'],
'boxes': cluster1['boxes'] + cluster2['boxes']
}
# 替换
working_clusters[merge_idx] = merged_cluster
working_clusters.pop(merge_idx + 1)
return working_clusters
def _get_boxes_in_column(self, boxes: List[Dict],
boundaries: List[Tuple[int, int]],
col_idx: int) -> List[Dict]:
"""
获取指定列范围内的 boxes(改进版:包含重叠)
Args:
boxes: 当前行的所有 boxes
boundaries: 列边界
col_idx: 列索引
Returns:
该列的 boxes
"""
if col_idx >= len(boundaries):
return []
x_start, x_end = boundaries[col_idx]
col_boxes = []
for box in boxes:
bbox = box['bbox']
box_x_start = bbox[0]
box_x_end = bbox[2]
# 🔑 改进:检查是否有重叠(不只是中心点)
overlap = not (box_x_start > x_end or box_x_end < x_start)
if overlap:
col_boxes.append(box)
return col_boxes
def _filter_boxes_in_table_region(self, paddle_boxes: List[Dict],
table_bbox: Optional[List[int]],
html: str) -> Tuple[List[Dict], List[int]]:
"""
筛选表格区域内的 paddle boxes
策略:
1. 如果有 table_bbox,使用边界框筛选(扩展边界)
2. 如果没有 table_bbox,通过内容匹配推断区域
Args:
paddle_boxes: paddle OCR 结果
table_bbox: 表格边界框 [x1, y1, x2, y2]
html: HTML 内容(用于内容验证)
Returns:
(筛选后的 boxes, 实际表格边界框)
"""
if not paddle_boxes:
return [], [0, 0, 0, 0]
# 🎯 策略 1: 使用提供的 table_bbox(扩展边界)
if table_bbox and len(table_bbox) == 4:
x1, y1, x2, y2 = table_bbox
# 扩展边界(考虑边框外的文本)
margin = 20
expanded_bbox = [
max(0, x1 - margin),
max(0, y1 - margin),
x2 + margin,
y2 + margin
]
filtered = []
for box in paddle_boxes:
bbox = box['bbox']
box_center_x = (bbox[0] + bbox[2]) / 2
box_center_y = (bbox[1] + bbox[3]) / 2
# 中心点在扩展区域内
if (expanded_bbox[0] <= box_center_x <= expanded_bbox[2] and
expanded_bbox[1] <= box_center_y <= expanded_bbox[3]):
filtered.append(box)
if filtered:
# 计算实际边界框
actual_bbox = [
min(b['bbox'][0] for b in filtered),
min(b['bbox'][1] for b in filtered),
max(b['bbox'][2] for b in filtered),
max(b['bbox'][3] for b in filtered)
]
return filtered, actual_bbox
# 🎯 策略 2: 通过内容匹配推断区域
print(" ℹ️ 无 table_bbox,使用内容匹配推断表格区域...")
# 提取 HTML 中的所有文本
from bs4 import BeautifulSoup
soup = BeautifulSoup(html, 'html.parser')
html_texts = set()
for cell in soup.find_all(['td', 'th']):
text = cell.get_text(strip=True)
if text:
html_texts.add(self.text_matcher.normalize_text(text))
if not html_texts:
return [], [0, 0, 0, 0]
# 找出与 HTML 内容匹配的 boxes
matched_boxes = []
for box in paddle_boxes:
normalized_text = self.text_matcher.normalize_text(box['text'])
# 检查是否匹配
if any(normalized_text in ht or ht in normalized_text
for ht in html_texts):
matched_boxes.append(box)
if not matched_boxes:
# 🔑 降级:如果精确匹配失败,使用模糊匹配
print(" ℹ️ 精确匹配失败,尝试模糊匹配...")
from fuzzywuzzy import fuzz
for box in paddle_boxes:
normalized_text = self.text_matcher.normalize_text(box['text'])
for ht in html_texts:
similarity = fuzz.partial_ratio(normalized_text, ht)
if similarity >= 70: # 降低阈值
matched_boxes.append(box)
break
if matched_boxes:
# 计算边界框
actual_bbox = [
min(b['bbox'][0] for b in matched_boxes),
min(b['bbox'][1] for b in matched_boxes),
max(b['bbox'][2] for b in matched_boxes),
max(b['bbox'][3] for b in matched_boxes)
]
# 🔑 扩展边界,包含可能遗漏的文本
margin = 30
expanded_bbox = [
max(0, actual_bbox[0] - margin),
max(0, actual_bbox[1] - margin),
actual_bbox[2] + margin,
actual_bbox[3] + margin
]
# 重新筛选(包含边界上的文本)
final_filtered = []
for box in paddle_boxes:
bbox = box['bbox']
box_center_x = (bbox[0] + bbox[2]) / 2
box_center_y = (bbox[1] + bbox[3]) / 2
if (expanded_bbox[0] <= box_center_x <= expanded_bbox[2] and
expanded_bbox[1] <= box_center_y <= expanded_bbox[3]):
final_filtered.append(box)
return final_filtered, actual_bbox
# 🔑 最后的降级:返回所有 boxes
print(" ⚠️ 无法确定表格区域,使用所有 paddle boxes")
if paddle_boxes:
actual_bbox = [
min(b['bbox'][0] for b in paddle_boxes),
min(b['bbox'][1] for b in paddle_boxes),
max(b['bbox'][2] for b in paddle_boxes),
max(b['bbox'][3] for b in paddle_boxes)
]
return paddle_boxes, actual_bbox
return [], [0, 0, 0, 0]
def _group_paddle_boxes_by_rows(self, paddle_boxes: List[Dict],
y_tolerance: int = 10,
auto_correct_skew: bool = True) -> List[Dict]:
"""
将 paddle_text_boxes 按 y 坐标分组(聚类)- 增强版本
Args:
paddle_boxes: Paddle OCR 文字框列表
y_tolerance: Y 坐标容忍度(像素)
auto_correct_skew: 是否自动校正倾斜
Returns:
分组列表,每组包含 {'y_center': float, 'boxes': List[Dict]}
"""
if not paddle_boxes:
return []
# 🎯 步骤 1: 检测并校正倾斜
if auto_correct_skew:
rotation_angle = self._calculate_rotation_angle_from_polys(paddle_boxes)
if abs(rotation_angle) > 0.5: # 倾斜角度 > 0.5 度才校正
# 假设图像尺寸从第一个 box 估算
max_x = max(box['bbox'][2] for box in paddle_boxes)
max_y = max(box['bbox'][3] for box in paddle_boxes)
image_size = (max_x, max_y)
print(f" 🔧 校正倾斜角度: {rotation_angle:.2f}°")
paddle_boxes = self._correct_bbox_skew(paddle_boxes, -rotation_angle, image_size)
# 🎯 步骤 2: 按校正后的 y 坐标分组
boxes_with_y = []
for box in paddle_boxes:
bbox = box['bbox']
y_center = (bbox[1] + bbox[3]) / 2
boxes_with_y.append({
'y_center': y_center,
'box': box
})
# 按 y 坐标排序
boxes_with_y.sort(key=lambda x: x['y_center'])
# 聚类(增强容忍度)
groups = []
current_group = None
# 🔑 动态调整容忍度(倾斜校正后可以更严格)
# effective_tolerance = y_tolerance if auto_correct_skew else y_tolerance * 1.5
for item in boxes_with_y:
if current_group is None:
# 开始新组
current_group = {
'y_center': item['y_center'],
'boxes': [item['box']]
}
else:
if abs(item['y_center'] - current_group['y_center']) <= y_tolerance:
current_group['boxes'].append(item['box'])
# 更新组的中心
current_group['y_center'] = sum(
(b['bbox'][1] + b['bbox'][3]) / 2 for b in current_group['boxes']
) / len(current_group['boxes'])
else:
groups.append(current_group)
current_group = {
'y_center': item['y_center'],
'boxes': [item['box']]
}
if current_group:
groups.append(current_group)
print(f" ✓ 分组完成: {len(groups)} 行")
return groups
def _calculate_rotation_angle_from_polys(self, paddle_boxes: List[Dict],
sample_ratio: float = 0.5,
outlier_threshold: float = 0.3) -> float:
"""
从 dt_polys 计算文档倾斜角度(改进版:更鲁棒)
"""
if not paddle_boxes:
return 0.0
# 🎯 步骤1: 收集文本行的倾斜角度
line_angles = []
for box in paddle_boxes:
poly = box.get('poly', [])
if len(poly) < 4:
continue
# 提取上边缘的两个点
x1, y1 = poly[0]
x2, y2 = poly[1]
# 计算宽度和高度
width = abs(x2 - x1)
height = abs(poly[2][1] - y1)
# 🔑 过滤条件
if width < 50: # 太短的文本不可靠
continue
if width < height * 0.5: # 垂直文本
continue
# ⚠️ 关键修复:考虑图像坐标系(y 轴向下)
dx = x2 - x1
dy = y2 - y1
if abs(dx) > 10:
# 🔧 使用 -arctan2 来校正坐标系方向
# 图像中向右下倾斜(dy>0)应该返回负角度
angle_rad = -np.arctan2(dy, dx)
# 只保留小角度倾斜(-15° ~ +15°)
if abs(angle_rad) < np.radians(15):
line_angles.append({
'angle': angle_rad,
'weight': width, # 长文本行权重更高
'y_center': (y1 + poly[2][1]) / 2
})
if len(line_angles) < 5:
print(" ⚠️ 有效样本不足,跳过倾斜校正")
return 0.0
# 🎯 步骤2: 按 y 坐标排序,只使用中间区域
line_angles.sort(key=lambda x: x['y_center'])
start_idx = int(len(line_angles) * (1 - sample_ratio) / 2)
end_idx = int(len(line_angles) * (1 + sample_ratio) / 2)
sampled_angles = line_angles[start_idx:end_idx]
# 🎯 步骤3: 计算中位数角度(初步估计)
raw_angles = [item['angle'] for item in sampled_angles]
median_angle = np.median(raw_angles)
# 🎯 步骤4: 过滤异常值(与中位数差异过大)
filtered_angles = []
for item in sampled_angles:
if abs(item['angle'] - median_angle) < outlier_threshold:
filtered_angles.append(item)
if len(filtered_angles) < 3:
print(" ⚠️ 过滤后样本不足")
return np.degrees(median_angle)
# 🎯 步骤5: 加权平均(长文本行权重更高)
total_weight = sum(item['weight'] for item in filtered_angles)
weighted_angle = sum(
item['angle'] * item['weight'] for item in filtered_angles
) / total_weight
angle_deg = np.degrees(weighted_angle)
print(f" 📐 倾斜角度检测:")
print(f" • 原始样本: {len(line_angles)} 个")
print(f" • 中间采样: {len(sampled_angles)} 个")
print(f" • 过滤后: {len(filtered_angles)} 个")
print(f" • 中位数角度: {np.degrees(median_angle):.3f}°")
print(f" • 加权平均: {angle_deg:.3f}°")
return angle_deg
def _rotate_point(self, point: Tuple[float, float],
angle_deg: float,
center: Tuple[float, float] = (0, 0)) -> Tuple[float, float]:
"""
旋转点坐标
Args:
point: 原始点 (x, y)
angle_deg: 旋转角度(度数,正值表示逆时针)
center: 旋转中心
Returns:
旋转后的点 (x', y')
"""
x, y = point
cx, cy = center
# 转换为弧度
angle_rad = np.radians(angle_deg)
# 平移到原点
x -= cx
y -= cy
# 旋转
x_new = x * np.cos(angle_rad) - y * np.sin(angle_rad)
y_new = x * np.sin(angle_rad) + y * np.cos(angle_rad)
# 平移回去
x_new += cx
y_new += cy
return (x_new, y_new)
def _correct_bbox_skew(self, paddle_boxes: List[Dict],
rotation_angle: float,
image_size: Tuple[int, int]) -> List[Dict]:
"""
校正文本框的倾斜
Args:
paddle_boxes: Paddle OCR 结果
rotation_angle: 倾斜角度
image_size: 图像尺寸 (width, height)
Returns:
校正后的文本框列表
"""
if abs(rotation_angle) < 0.1: # 倾斜角度很小,不需要校正
return paddle_boxes
width, height = image_size
center = (width / 2, height / 2)
corrected_boxes = []
for box in paddle_boxes:
poly = box.get('poly', [])
if len(poly) < 4:
corrected_boxes.append(box)
continue
# 🎯 旋转多边形的四个角点
rotated_poly = [
self._rotate_point(point, -rotation_angle, center)
for point in poly
]
# 重新计算 bbox
x_coords = [p[0] for p in rotated_poly]
y_coords = [p[1] for p in rotated_poly]
corrected_bbox = [
min(x_coords),
min(y_coords),
max(x_coords),
max(y_coords)
]
# 创建校正后的 box
corrected_box = box.copy()
corrected_box['bbox'] = corrected_bbox
corrected_box['poly'] = rotated_poly
corrected_box['original_bbox'] = box['bbox'] # 保存原始坐标
corrected_boxes.append(corrected_box)
return corrected_boxes
def _match_html_rows_to_paddle_groups(self, html_rows: List,
grouped_boxes: List[Dict]) -> Dict[int, List[int]]:
"""
智能匹配 HTML 行与 paddle 分组(修正版:严格顺序匹配)
策略:
1. 数量相等:1:1 映射
2. 数量不等:按内容匹配,但保持 y 坐标顺序
"""
if not html_rows or not grouped_boxes:
return {}
mapping = {}
# 🎯 策略 1: 数量相等,简单 1:1 映射
if len(html_rows) == len(grouped_boxes):
for i in range(len(html_rows)):
mapping[i] = [i]
return mapping
# 🎯 策略 2: 基于内容匹配(修正版:严格单调递增)
from fuzzywuzzy import fuzz
used_groups = set()
next_group_to_check = 0 # 🔑 关键改进:维护全局组索引
for row_idx, row in enumerate(html_rows):
row_texts = [cell.get_text(strip=True) for cell in row.find_all(['td', 'th'])]
row_texts = [t for t in row_texts if t]
if not row_texts:
mapping[row_idx] = []
continue
row_text_normalized = [self.text_matcher.normalize_text(t) for t in row_texts]
row_combined_text = ''.join(row_text_normalized)
best_groups = []
best_score = 0
# 🔑 关键改进:从 next_group_to_check 开始搜索
max_window = 5
for group_count in range(1, max_window + 1):
# 🔑 从当前位置开始,而不是从第一个未使用的组
start_group = next_group_to_check
end_group = start_group + group_count
if end_group > len(grouped_boxes):
break
combined_group_indices = list(range(start_group, end_group))
# 🔑 跳过已使用的组(但不重新计算 start_group)
if any(idx in used_groups for idx in combined_group_indices):
continue
# 收集组内所有文本
combined_texts = []
for g_idx in combined_group_indices:
group_boxes = grouped_boxes[g_idx].get('boxes', [])
for box in group_boxes:
if box.get('used'):
continue
normalized_text = self.text_matcher.normalize_text(box.get('text', ''))
if normalized_text:
combined_texts.append(normalized_text)
if not combined_texts:
continue
paddle_combined_text = ''.join(combined_texts)
# 匹配策略
match_count = 0
for rt in row_text_normalized:
if len(rt) < 2:
continue
# 精确匹配
if any(rt == ct for ct in combined_texts):
match_count += 1
continue
# 子串匹配
if any(rt in ct or ct in rt for ct in combined_texts):
match_count += 1
continue
# 在合并文本中查找
if rt in paddle_combined_text:
match_count += 1
continue
# 模糊匹配
for ct in combined_texts:
similarity = fuzz.partial_ratio(rt, ct)
if similarity >= 75:
match_count += 1
break
# 整行匹配
row_similarity = fuzz.partial_ratio(row_combined_text, paddle_combined_text)
coverage = match_count / len(row_texts) if row_texts else 0
combined_coverage = row_similarity / 100.0
final_score = max(coverage, combined_coverage)
if final_score > best_score:
best_score = final_score
best_groups = combined_group_indices
print(f" 行 {row_idx} 候选: 组 {combined_group_indices}, "
f"单元格匹配: {match_count}/{len(row_texts)}, "
f"整行相似度: {row_similarity}%, "
f"最终得分: {final_score:.2f}")
if final_score >= 0.9:
break
# 🔑 降低阈值
if best_groups and best_score >= 0.3:
mapping[row_idx] = best_groups
used_groups.update(best_groups)
# 🔑 关键改进:更新下一个要检查的组
next_group_to_check = max(best_groups) + 1
print(f" ✓ 行 {row_idx}: 匹配组 {best_groups} (得分: {best_score:.2f}), "
f"下次从组 {next_group_to_check} 开始")
else:
mapping[row_idx] = []
# 🔑 关键改进:即使没匹配,也要推进指针(假设跳过 1 个组)
if next_group_to_check < len(grouped_boxes):
next_group_to_check += 1
print(f" ✗ 行 {row_idx}: 无匹配 (最佳得分: {best_score:.2f}), "
f"推进到组 {next_group_to_check}")
# 🎯 策略 3: 第二遍 - 处理未使用的组(关键!)
unused_groups = [i for i in range(len(grouped_boxes)) if i not in used_groups]
if unused_groups:
print(f" ℹ️ 发现 {len(unused_groups)} 个未匹配的 paddle 组: {unused_groups}")
# 🔑 将未使用的组合并到相邻的已匹配行
for unused_idx in unused_groups:
# 🎯 关键改进:计算与相邻行的边界距离
unused_group = grouped_boxes[unused_idx]
unused_y_min = min(b['bbox'][1] for b in unused_group['boxes'])
unused_y_max = max(b['bbox'][3] for b in unused_group['boxes'])
# 🔑 查找上方和下方最近的已使用组
above_idx = None
below_idx = None
above_distance = float('inf')
below_distance = float('inf')
# 向上查找
for i in range(unused_idx - 1, -1, -1):
if i in used_groups:
above_idx = i
# 🎯 边界距离:unused 的最小 y - above 的最大 y
above_group = grouped_boxes[i]
max_y_box = max(
above_group['boxes'],
key=lambda b: b['bbox'][3]
)
above_y_center = (max_y_box['bbox'][1] + max_y_box['bbox'][3]) / 2
above_distance = abs(unused_y_min - above_y_center)
print(f" • 组 {unused_idx} 与上方组 {i} 距离: {above_distance:.1f}px")
break
# 向下查找
for i in range(unused_idx + 1, len(grouped_boxes)):
if i in used_groups:
below_idx = i
# 🎯 边界距离:below 的最小 y - unused 的最大 y
below_group = grouped_boxes[i]
min_y_box = min(
below_group['boxes'],
key=lambda b: b['bbox'][1]
)
below_y_center = (min_y_box['bbox'][1] + min_y_box['bbox'][3]) / 2
below_distance = abs(below_y_center - unused_y_max)
print(f" • 组 {unused_idx} 与下方组 {i} 距离: {below_distance:.1f}px")
break
# 🎯 选择距离更近的一侧
if above_idx is not None and below_idx is not None:
# 都存在,选择距离更近的
if above_distance < below_distance:
closest_used_idx = above_idx
merge_direction = "上方"
else:
closest_used_idx = below_idx
merge_direction = "下方"
print(f" ✓ 组 {unused_idx} 选择合并到{merge_direction}组 {closest_used_idx}")
elif above_idx is not None:
closest_used_idx = above_idx
merge_direction = "上方"
elif below_idx is not None:
closest_used_idx = below_idx
merge_direction = "下方"
else:
print(f" ⚠️ 组 {unused_idx} 无相邻已使用组,跳过")
continue
# 🔑 找到该组对应的 HTML 行
target_html_row = None
for html_row_idx, group_indices in mapping.items():
if closest_used_idx in group_indices:
target_html_row = html_row_idx
break
if target_html_row is not None:
# 🎯 根据合并方向决定目标行
if merge_direction == "上方":
# 合并到上方对应的 HTML 行
if target_html_row in mapping:
if unused_idx not in mapping[target_html_row]:
mapping[target_html_row].append(unused_idx)
print(f" • 组 {unused_idx} 合并到 HTML 行 {target_html_row}(上方行)")
else:
# 合并到下方对应的 HTML 行
if target_html_row in mapping:
if unused_idx not in mapping[target_html_row]:
mapping[target_html_row].append(unused_idx)
print(f" • 组 {unused_idx} 合并到 HTML 行 {target_html_row}(下方行)")
used_groups.add(unused_idx)
# 🔑 策略 4: 第三遍 - 按 y 坐标排序每行的组索引
for row_idx in mapping:
if mapping[row_idx]:
mapping[row_idx].sort(key=lambda idx: grouped_boxes[idx]['y_center'])
return mapping
def _preprocess_close_groups(self, grouped_boxes: List[Dict],
y_gap_threshold: int = 10) -> List[List[int]]:
"""
🆕 预处理:将 y 间距很小的组预合并
Args:
grouped_boxes: 原始分组
y_gap_threshold: Y 间距阈值(小于此值认为是同一行)
Returns:
预处理后的组索引列表 [[0,1], [2], [3,4,5], ...]
"""
if not grouped_boxes:
return []
preprocessed = []
current_group = [0]
for i in range(1, len(grouped_boxes)):
prev_group = grouped_boxes[i - 1]
curr_group = grouped_boxes[i]
# 计算间距
prev_y_max = max(b['bbox'][3] for b in prev_group['boxes'])
curr_y_min = min(b['bbox'][1] for b in curr_group['boxes'])
gap = abs(curr_y_min - prev_y_max)
if gap <= y_gap_threshold:
# 间距很小,合并
current_group.append(i)
print(f" 预合并: 组 {i-1} 和 {i} (间距: {gap}px)")
else:
# 间距较大,开始新组
preprocessed.append(current_group)
current_group = [i]
# 添加最后一组
if current_group:
preprocessed.append(current_group)
return preprocessed
def _match_cell_sequential(self, cell_text: str,
boxes: List[Dict],
col_boundaries: List[Tuple[int, int]],
start_idx: int) -> Optional[Dict]:
"""
🎯 顺序匹配单元格:从指定位置开始,逐步合并 boxes 直到匹配
策略:
1. 找到第一个未使用的 box
2. 尝试单个 box 精确匹配
3. 如果失败,尝试合并多个 boxes
Args:
cell_text: HTML 单元格文本
boxes: 候选 boxes(已按 x 坐标排序)
col_boundaries: 列边界列表
start_idx: 起始索引
Returns:
{'bbox': [x1,y1,x2,y2], 'text': str, 'score': float,
'paddle_indices': [idx1, idx2], 'used_boxes': [box1, box2],
'last_used_index': int}
"""
from fuzzywuzzy import fuzz
cell_text_normalized = self.text_matcher.normalize_text(cell_text)
if len(cell_text_normalized) < 2:
return None
# 🔑 找到第一个未使用的 box
first_unused_idx = start_idx
while first_unused_idx < len(boxes) and boxes[first_unused_idx].get('used'):
first_unused_idx += 1
if first_unused_idx >= len(boxes):
return None
# 🔑 策略 1: 单个 box 精确匹配
for box in boxes[first_unused_idx:]:
if box.get('used'):
continue
box_text = self.text_matcher.normalize_text(box['text'])
if cell_text_normalized == box_text:
return self._build_match_result([box], box['text'], 100.0, boxes.index(box))
# 🔑 策略 2: 多个 boxes 合并匹配
unused_boxes = [b for b in boxes if not b.get('used')]
# 合并同列的 boxes 合并
merged_bboxes = []
for col_idx in range(len(col_boundaries)):
combo_boxes = self._get_boxes_in_column(unused_boxes, col_boundaries, col_idx)
if len(combo_boxes) > 0:
sorted_combo = sorted(combo_boxes, key=lambda b: (b['bbox'][1], b['bbox'][0]))
merged_text = ''.join([b['text'] for b in sorted_combo])
merged_bboxes.append({
'text': merged_text,
'sorted_combo': sorted_combo
})
for box in merged_bboxes:
# 1. 精确匹配
merged_text_normalized = self.text_matcher.normalize_text(box['text'])
if cell_text_normalized == merged_text_normalized:
last_sort_idx = boxes.index(box['sorted_combo'][-1])
return self._build_match_result(box['sorted_combo'], box['text'], 100.0, last_sort_idx)
# 2. 子串匹配
is_substring = (cell_text_normalized in merged_text_normalized or
merged_text_normalized in cell_text_normalized)
# 3. 模糊匹配
similarity = fuzz.partial_ratio(cell_text_normalized, merged_text_normalized)
# 🎯 子串匹配加分
if is_substring:
similarity = min(100, similarity + 10)
if similarity >= self.text_matcher.similarity_threshold:
print(f" ✓ 匹配成功: '{cell_text[:15]}' vs '{merged_text[:15]}' (相似度: {similarity})")
return self._build_match_result(box['sorted_combo'], box['text'], similarity, start_idx)
print(f" ✗ 匹配失败: '{cell_text[:15]}'")
return None
def _build_match_result(self, boxes: List[Dict], text: str,
score: float, last_index: int) -> Dict:
"""构建匹配结果(使用原始坐标)"""
# 🔑 关键修复:使用 original_bbox(如果存在)
def get_original_bbox(box: Dict) -> List[int]:
return box.get('original_bbox', box['bbox'])
original_bboxes = [get_original_bbox(b) for b in boxes]
merged_bbox = [
min(b[0] for b in original_bboxes),
min(b[1] for b in original_bboxes),
max(b[2] for b in original_bboxes),
max(b[3] for b in original_bboxes)
]
return {
'bbox': merged_bbox, # ✅ 使用原始坐标
'text': text,
'score': score,
'paddle_indices': [b['paddle_bbox_index'] for b in boxes],
'used_boxes': boxes,
'last_used_index': last_index
}