"""
表格单元格匹配器
负责将 HTML 表格单元格与 PaddleOCR bbox 进行匹配
"""
from typing import List, Dict, Tuple, Optional
from bs4 import BeautifulSoup
import numpy as np
try:
from rapidfuzz import fuzz
except ImportError:
from fuzzywuzzy import fuzz
try:
from .text_matcher import TextMatcher
from .bbox_extractor import BBoxExtractor
except ImportError:
from text_matcher import TextMatcher
from bbox_extractor import BBoxExtractor
class TableCellMatcher:
"""表格单元格匹配器"""
def __init__(self, text_matcher: TextMatcher,
x_tolerance: int = 3,
y_tolerance: int = 10):
"""
Args:
text_matcher: 文本匹配器
x_tolerance: X轴容差(用于列边界判断)
y_tolerance: Y轴容差(用于行分组)
"""
self.text_matcher = text_matcher
self.x_tolerance = x_tolerance
self.y_tolerance = y_tolerance
def enhance_table_html_with_bbox(self, html: str, paddle_text_boxes: List[Dict],
start_pointer: int, table_bbox: Optional[List[int]] = None) -> Tuple[str, List[Dict], int]:
"""
为 HTML 表格添加 bbox 信息(优化版:先筛选表格区域)
策略:
1. 根据 table_bbox 筛选出表格区域内的 paddle_text_boxes
2. 将筛选后的 boxes 按行分组
3. 智能匹配 HTML 行与 paddle 行组
4. 在匹配的组内查找单元格
Args:
html: HTML 表格
paddle_text_boxes: 全部 paddle OCR 结果
start_pointer: 开始位置
table_bbox: 表格边界框 [x1, y1, x2, y2]
"""
soup = BeautifulSoup(html, 'html.parser')
cells = []
# 🔑 第一步:筛选表格区域内的 paddle boxes
table_region_boxes, actual_table_bbox = self._filter_boxes_in_table_region(
paddle_text_boxes[start_pointer:],
table_bbox,
html
)
if not table_region_boxes:
print(f"⚠️ 未在表格区域找到 paddle boxes")
return str(soup), cells, start_pointer
print(f"📊 表格区域: {len(table_region_boxes)} 个文本框")
print(f" 边界: {actual_table_bbox}")
# 🔑 第二步:将表格区域的 boxes 按行分组
grouped_boxes = self._group_paddle_boxes_by_rows(
table_region_boxes,
y_tolerance=self.y_tolerance,
auto_correct_skew=True
)
# 🔑 第三步:在每组内按 x 坐标排序
for group in grouped_boxes:
group['boxes'].sort(key=lambda x: x['bbox'][0])
grouped_boxes.sort(key=lambda g: g['y_center'])
print(f" 分组: {len(grouped_boxes)} 行")
# 🔑 第四步:智能匹配 HTML 行与 paddle 行组
html_rows = soup.find_all('tr')
row_mapping = self._match_html_rows_to_paddle_groups(html_rows, grouped_boxes)
print(f" HTML行: {len(html_rows)} 行")
print(f" 映射: {len([v for v in row_mapping.values() if v])} 个有效映射")
# 🔑 第五步:遍历 HTML 表格,使用映射关系查找
for row_idx, row in enumerate(html_rows):
group_indices = row_mapping.get(row_idx, [])
if not group_indices:
continue
# 合并多个组的 boxes
current_boxes = []
for group_idx in group_indices:
if group_idx < len(grouped_boxes):
current_boxes.extend(grouped_boxes[group_idx]['boxes'])
current_boxes.sort(key=lambda x: x['bbox'][0])
# 🎯 关键改进:提取 HTML 单元格并预先确定列边界
html_cells = row.find_all(['td', 'th'])
if not html_cells:
continue
# 🔑 预估列边界(基于 x 坐标分布)
col_boundaries = self._estimate_column_boundaries(
current_boxes,
len(html_cells)
)
print(f" 行 {row_idx + 1}: {len(html_cells)} 列,边界: {col_boundaries}")
# 🎯 关键改进:顺序指针匹配
box_pointer = 0 # 当前行的 boxes 指针
for col_idx, cell in enumerate(html_cells):
cell_text = cell.get_text(strip=True)
if not cell_text:
continue
# 🔑 从当前指针开始匹配
matched_result = self._match_cell_sequential(
cell_text,
current_boxes,
col_boundaries,
box_pointer
)
if matched_result:
merged_bbox = matched_result['bbox']
merged_text = matched_result['text']
cell['data-bbox'] = f"[{merged_bbox[0]},{merged_bbox[1]},{merged_bbox[2]},{merged_bbox[3]}]"
cell['data-score'] = f"{matched_result['score']:.4f}"
cell['data-paddle-indices'] = str(matched_result['paddle_indices'])
cells.append({
'type': 'table_cell',
'text': cell_text,
'matched_text': merged_text,
'bbox': merged_bbox,
'row': row_idx + 1,
'col': col_idx + 1,
'score': matched_result['score'],
'paddle_bbox_indices': matched_result['paddle_indices']
})
# 标记已使用
for box in matched_result['used_boxes']:
box['used'] = True
# 🎯 移动指针到最后使用的 box 之后
box_pointer = matched_result['last_used_index'] + 1
print(f" 列 {col_idx + 1}: '{cell_text[:20]}...' 匹配 {len(matched_result['used_boxes'])} 个box (指针: {box_pointer})")
# 计算新的指针位置
used_count = sum(1 for box in table_region_boxes if box.get('used'))
new_pointer = start_pointer + used_count
print(f" 匹配: {len(cells)} 个单元格")
return str(soup), cells, new_pointer
def _estimate_column_boundaries(self, boxes: List[Dict],
num_cols: int) -> List[Tuple[int, int]]:
"""
估算列边界(改进版:处理同列多文本框)
Args:
boxes: 当前行的所有 boxes(已按 x 排序)
num_cols: HTML 表格的列数
Returns:
列边界列表 [(x_start, x_end), ...]
"""
if not boxes:
return []
# 🔑 关键改进:先按 x 坐标聚类(合并同列的多个文本框)
x_clusters = self._cluster_boxes_by_x(boxes, x_tolerance=self.x_tolerance)
print(f" X聚类: {len(boxes)} 个boxes -> {len(x_clusters)} 个列簇")
# 获取所有 x 坐标范围
x_min = min(cluster['x_min'] for cluster in x_clusters)
x_max = max(cluster['x_max'] for cluster in x_clusters)
# 🎯 策略 1: 如果聚类数量<=列数接近
if len(x_clusters) <= num_cols:
# 直接使用聚类边界
boundaries = [(cluster['x_min'], cluster['x_max'])
for cluster in x_clusters]
return boundaries
# 🎯 策略 2: 聚类数多于列数(某些列有多个文本簇)
if len(x_clusters) > num_cols:
print(f" ℹ️ 聚类数 {len(x_clusters)} > 列数 {num_cols},合并相近簇")
# 合并相近的簇
merged_clusters = self._merge_close_clusters(x_clusters, num_cols)
boundaries = [(cluster['x_min'], cluster['x_max'])
for cluster in merged_clusters]
return boundaries
return []
def _cluster_boxes_by_x(self, boxes: List[Dict],
x_tolerance: int = 3) -> List[Dict]:
"""
按 x 坐标聚类(合并同列的多个文本框)
Args:
boxes: 文本框列表
x_tolerance: X坐标容忍度
Returns:
聚类列表 [{'x_min': int, 'x_max': int, 'boxes': List[Dict]}, ...]
"""
if not boxes:
return []
# 按左边界 x 坐标排序
sorted_boxes = sorted(boxes, key=lambda b: b['bbox'][0])
clusters = []
current_cluster = None
for box in sorted_boxes:
bbox = box['bbox']
x_start = bbox[0]
x_end = bbox[2]
if current_cluster is None:
# 开始新簇
current_cluster = {
'x_min': x_start,
'x_max': x_end,
'boxes': [box]
}
else:
# 🔑 检查是否属于当前簇(修正后的逻辑)
# 1. x 坐标有重叠:x_start <= current_x_max 且 x_end >= current_x_min
# 2. 或者距离在容忍度内
has_overlap = (x_start <= current_cluster['x_max'] and
x_end >= current_cluster['x_min'])
is_close = abs(x_start - current_cluster['x_max']) <= x_tolerance
if has_overlap or is_close:
# 合并到当前簇
current_cluster['boxes'].append(box)
current_cluster['x_min'] = min(current_cluster['x_min'], x_start)
current_cluster['x_max'] = max(current_cluster['x_max'], x_end)
else:
# 保存当前簇,开始新簇
clusters.append(current_cluster)
current_cluster = {
'x_min': x_start,
'x_max': x_end,
'boxes': [box]
}
# 添加最后一簇
if current_cluster:
clusters.append(current_cluster)
return clusters
def _merge_close_clusters(self, clusters: List[Dict],
target_count: int) -> List[Dict]:
"""
合并相近的簇,直到数量等于目标列数
Args:
clusters: 聚类列表
target_count: 目标列数
Returns:
合并后的聚类列表
"""
if len(clusters) <= target_count:
return clusters
# 复制一份,避免修改原数据
working_clusters = [c.copy() for c in clusters]
while len(working_clusters) > target_count:
# 找到距离最近的两个簇
min_distance = float('inf')
merge_idx = 0
for i in range(len(working_clusters) - 1):
distance = working_clusters[i + 1]['x_min'] - working_clusters[i]['x_max']
if distance < min_distance:
min_distance = distance
merge_idx = i
# 合并
cluster1 = working_clusters[merge_idx]
cluster2 = working_clusters[merge_idx + 1]
merged_cluster = {
'x_min': cluster1['x_min'],
'x_max': cluster2['x_max'],
'boxes': cluster1['boxes'] + cluster2['boxes']
}
# 替换
working_clusters[merge_idx] = merged_cluster
working_clusters.pop(merge_idx + 1)
return working_clusters
def _get_boxes_in_column(self, boxes: List[Dict],
boundaries: List[Tuple[int, int]],
col_idx: int) -> List[Dict]:
"""
获取指定列范围内的 boxes(改进版:包含重叠)
Args:
boxes: 当前行的所有 boxes
boundaries: 列边界
col_idx: 列索引
Returns:
该列的 boxes
"""
if col_idx >= len(boundaries):
return []
x_start, x_end = boundaries[col_idx]
col_boxes = []
for box in boxes:
bbox = box['bbox']
box_x_start = bbox[0]
box_x_end = bbox[2]
# 🔑 改进:检查是否有重叠(不只是中心点)
overlap = not (box_x_start > x_end or box_x_end < x_start)
if overlap:
col_boxes.append(box)
return col_boxes
def _filter_boxes_in_table_region(self, paddle_boxes: List[Dict],
table_bbox: Optional[List[int]],
html: str) -> Tuple[List[Dict], List[int]]:
"""
筛选表格区域内的 paddle boxes
策略:
1. 如果有 table_bbox,使用边界框筛选(扩展边界)
2. 如果没有 table_bbox,通过内容匹配推断区域
Args:
paddle_boxes: paddle OCR 结果
table_bbox: 表格边界框 [x1, y1, x2, y2]
html: HTML 内容(用于内容验证)
Returns:
(筛选后的 boxes, 实际表格边界框)
"""
if not paddle_boxes:
return [], [0, 0, 0, 0]
# 🎯 策略 1: 使用提供的 table_bbox(扩展边界)
if table_bbox and len(table_bbox) == 4:
x1, y1, x2, y2 = table_bbox
# 扩展边界(考虑边框外的文本)
margin = 20
expanded_bbox = [
max(0, x1 - margin),
max(0, y1 - margin),
x2 + margin,
y2 + margin
]
filtered = []
for box in paddle_boxes:
bbox = box['bbox']
box_center_x = (bbox[0] + bbox[2]) / 2
box_center_y = (bbox[1] + bbox[3]) / 2
# 中心点在扩展区域内
if (expanded_bbox[0] <= box_center_x <= expanded_bbox[2] and
expanded_bbox[1] <= box_center_y <= expanded_bbox[3]):
filtered.append(box)
if filtered:
# 计算实际边界框
actual_bbox = [
min(b['bbox'][0] for b in filtered),
min(b['bbox'][1] for b in filtered),
max(b['bbox'][2] for b in filtered),
max(b['bbox'][3] for b in filtered)
]
return filtered, actual_bbox
# 🎯 策略 2: 通过内容匹配推断区域
print(" ℹ️ 无 table_bbox,使用内容匹配推断表格区域...")
# 提取 HTML 中的所有文本
from bs4 import BeautifulSoup
soup = BeautifulSoup(html, 'html.parser')
html_texts = set()
for cell in soup.find_all(['td', 'th']):
text = cell.get_text(strip=True)
if text:
html_texts.add(self.text_matcher.normalize_text(text))
if not html_texts:
return [], [0, 0, 0, 0]
# 找出与 HTML 内容匹配的 boxes
matched_boxes = []
for box in paddle_boxes:
normalized_text = self.text_matcher.normalize_text(box['text'])
# 检查是否匹配
if any(normalized_text in ht or ht in normalized_text
for ht in html_texts):
matched_boxes.append(box)
if not matched_boxes:
# 🔑 降级:如果精确匹配失败,使用模糊匹配
print(" ℹ️ 精确匹配失败,尝试模糊匹配...")
for box in paddle_boxes:
normalized_text = self.text_matcher.normalize_text(box['text'])
for ht in html_texts:
similarity = fuzz.partial_ratio(normalized_text, ht)
if similarity >= 70: # 降低阈值
matched_boxes.append(box)
break
if matched_boxes:
# 计算边界框
actual_bbox = [
min(b['bbox'][0] for b in matched_boxes),
min(b['bbox'][1] for b in matched_boxes),
max(b['bbox'][2] for b in matched_boxes),
max(b['bbox'][3] for b in matched_boxes)
]
# 🔑 扩展边界,包含可能遗漏的文本
margin = 30
expanded_bbox = [
max(0, actual_bbox[0] - margin),
max(0, actual_bbox[1] - margin),
actual_bbox[2] + margin,
actual_bbox[3] + margin
]
# 重新筛选(包含边界上的文本)
final_filtered = []
for box in paddle_boxes:
bbox = box['bbox']
box_center_x = (bbox[0] + bbox[2]) / 2
box_center_y = (bbox[1] + bbox[3]) / 2
if (expanded_bbox[0] <= box_center_x <= expanded_bbox[2] and
expanded_bbox[1] <= box_center_y <= expanded_bbox[3]):
final_filtered.append(box)
return final_filtered, actual_bbox
# 🔑 最后的降级:返回所有 boxes
print(" ⚠️ 无法确定表格区域,使用所有 paddle boxes")
if paddle_boxes:
actual_bbox = [
min(b['bbox'][0] for b in paddle_boxes),
min(b['bbox'][1] for b in paddle_boxes),
max(b['bbox'][2] for b in paddle_boxes),
max(b['bbox'][3] for b in paddle_boxes)
]
return paddle_boxes, actual_bbox
return [], [0, 0, 0, 0]
def _group_paddle_boxes_by_rows(self, paddle_boxes: List[Dict],
y_tolerance: int = 10,
auto_correct_skew: bool = True) -> List[Dict]:
"""
将 paddle_text_boxes 按 y 坐标分组(聚类)- 增强版本
Args:
paddle_boxes: Paddle OCR 文字框列表
y_tolerance: Y 坐标容忍度(像素)
auto_correct_skew: 是否自动校正倾斜
Returns:
分组列表,每组包含 {'y_center': float, 'boxes': List[Dict]}
"""
if not paddle_boxes:
return []
# 🎯 步骤 1: 检测并校正倾斜(使用 BBoxExtractor)
if auto_correct_skew:
rotation_angle = BBoxExtractor.calculate_skew_angle(paddle_boxes)
if abs(rotation_angle) > 0.5:
max_x = max(box['bbox'][2] for box in paddle_boxes)
max_y = max(box['bbox'][3] for box in paddle_boxes)
image_size = (max_x, max_y)
print(f" 🔧 校正倾斜角度: {rotation_angle:.2f}°")
paddle_boxes = BBoxExtractor.correct_boxes_skew(
paddle_boxes, -rotation_angle, image_size
)
# 🎯 步骤 2: 按校正后的 y 坐标分组
boxes_with_y = []
for box in paddle_boxes:
bbox = box['bbox']
y_center = (bbox[1] + bbox[3]) / 2
boxes_with_y.append({
'y_center': y_center,
'box': box
})
# 按 y 坐标排序
boxes_with_y.sort(key=lambda x: x['y_center'])
groups = []
current_group = None
for item in boxes_with_y:
if current_group is None:
# 开始新组
current_group = {
'y_center': item['y_center'],
'boxes': [item['box']]
}
else:
if abs(item['y_center'] - current_group['y_center']) <= y_tolerance:
current_group['boxes'].append(item['box'])
# 更新组的中心
current_group['y_center'] = sum(
(b['bbox'][1] + b['bbox'][3]) / 2 for b in current_group['boxes']
) / len(current_group['boxes'])
else:
groups.append(current_group)
current_group = {
'y_center': item['y_center'],
'boxes': [item['box']]
}
if current_group:
groups.append(current_group)
print(f" ✓ 分组完成: {len(groups)} 行")
return groups
def _match_html_rows_to_paddle_groups(self, html_rows: List,
grouped_boxes: List[Dict]) -> Dict[int, List[int]]:
"""
智能匹配 HTML 行与 paddle 分组(增强版 DP:支持跳过 HTML 行,防止链条断裂)
"""
if not html_rows or not grouped_boxes:
return {}
mapping = {}
# 🎯 策略 1: 数量相等,简单 1:1 映射
if len(html_rows) == len(grouped_boxes):
for i in range(len(html_rows)):
mapping[i] = [i]
return mapping
# --- 准备数据 ---
# 提取 HTML 文本
html_row_texts = []
for row in html_rows:
cells = row.find_all(['td', 'th'])
texts = [self.text_matcher.normalize_text(c.get_text(strip=True)) for c in cells]
html_row_texts.append("".join(texts))
# 预计算所有组的文本
group_texts = []
for group in grouped_boxes:
boxes = group['boxes']
texts = [self.text_matcher.normalize_text(b['text']) for b in boxes]
group_texts.append("".join(texts))
n_html = len(html_row_texts)
n_paddle = len(grouped_boxes)
# ⚡️ 优化 3: 预计算合并文本
MAX_MERGE = 4
merged_cache = {}
for j in range(n_paddle):
current_t = ""
for k in range(MAX_MERGE):
if j + k < n_paddle:
current_t += group_texts[j + k]
merged_cache[(j, k + 1)] = current_t
else:
break
# --- 动态规划 (DP) ---
# dp[i][j] 表示:HTML 前 i 行 (0..i) 匹配到了 Paddle 的前 j 组 (0..j) 的最大得分
# 初始化为负无穷
dp = np.full((n_html, n_paddle), -np.inf)
# 记录路径:path[i][j] = (prev_j, start_j)
# prev_j: 上一行结束的 paddle index
# start_j: 当前行开始的 paddle index (因为一行可能对应多个组)
path = {}
# 参数配置
SEARCH_WINDOW = 15 # 向前搜索窗口
SKIP_PADDLE_PENALTY = 0.1 # 跳过 Paddle 组的惩罚
SKIP_HTML_PENALTY = 0.3 # 关键:跳过 HTML 行的惩罚
# --- 1. 初始化第一行 ---
# 选项 A: 匹配 Paddle 组
for end_j in range(min(n_paddle, SEARCH_WINDOW + MAX_MERGE)):
for count in range(1, MAX_MERGE + 1):
start_j = end_j - count + 1
if start_j < 0: continue
current_text = merged_cache.get((start_j, count), "")
similarity = self._calculate_similarity(html_row_texts[0], current_text)
penalty = start_j * SKIP_PADDLE_PENALTY
score = similarity - penalty
# 只有得分尚可才作为有效状态
if score > 0.1:
if score > dp[0][end_j]:
dp[0][end_j] = score
path[(0, end_j)] = (-1, start_j)
# 选项 B: 第一行就跳过 (虽然少见,但为了完整性)
# 如果第一行跳过,相当于没有消耗任何 paddle 组,状态难以用 dp[0][j] 表达
# 这里简化处理,假设第一行必须匹配点什么,或者由后续行修正
# --- 2. 状态转移 ---
for i in range(1, n_html):
html_text = html_row_texts[i]
# 获取上一行所有有效位置
valid_prev_indices = [j for j in range(n_paddle) if dp[i-1][j] > -np.inf]
# 剪枝
if len(valid_prev_indices) > 30:
valid_prev_indices.sort(key=lambda j: dp[i-1][j], reverse=True)
valid_prev_indices = valid_prev_indices[:30]
# 🛡️ 关键修复:允许跳过当前 HTML 行 (继承上一行的状态)
# 如果跳过当前行,Paddle 指针 j 不变
for prev_j in valid_prev_indices:
score_skip = dp[i-1][prev_j] - SKIP_HTML_PENALTY
if score_skip > dp[i][prev_j]:
dp[i][prev_j] = score_skip
# 记录路径:start_j = prev_j + 1 表示没有消耗新组 (空范围)
path[(i, prev_j)] = (prev_j, prev_j + 1)
# 如果是空行,直接跳过计算,仅保留继承的状态
if not html_text:
continue
# 正常匹配逻辑
for prev_j in valid_prev_indices:
prev_score = dp[i-1][prev_j]
max_gap = min(SEARCH_WINDOW, n_paddle - prev_j - 1)
for gap in range(max_gap):
start_j = prev_j + 1 + gap
for count in range(1, MAX_MERGE + 1):
end_j = start_j + count - 1
if end_j >= n_paddle: break
current_text = merged_cache.get((start_j, count), "")
# 长度预筛选
h_len = len(html_text)
p_len = len(current_text)
if h_len > 10 and p_len < h_len * 0.2:
continue
similarity = self._calculate_similarity(html_text, current_text)
# 计算惩罚
# 1. 跳过惩罚 (gap)
# 2. 长度惩罚 (防止过度合并)
len_penalty = 0.0
if h_len > 0:
ratio = p_len / h_len
if ratio > 2.0: len_penalty = (ratio - 2.0) * 0.2
current_score = similarity - (gap * SKIP_PADDLE_PENALTY) - len_penalty
# 只有正收益才转移
if current_score > 0.1:
total_score = prev_score + current_score
if total_score > dp[i][end_j]:
dp[i][end_j] = total_score
path[(i, end_j)] = (prev_j, start_j)
# --- 3. 回溯找最优路径 ---
# 找到最后一行得分最高的结束位置
best_end_j = -1
max_score = -np.inf
# 优先找最后一行,如果最后一行没匹配上,往前找
found_end = False
for i in range(n_html - 1, -1, -1):
for j in range(n_paddle):
if dp[i][j] > max_score:
max_score = dp[i][j]
best_end_j = j
best_last_row = i
if max_score > -np.inf:
found_end = True
break
mapping = {}
used_groups = set()
if found_end:
curr_i = best_last_row
curr_j = best_end_j
while curr_i >= 0:
if (curr_i, curr_j) in path:
prev_j, start_j = path[(curr_i, curr_j)]
# 如果 start_j <= curr_j,说明消耗了 Paddle 组
# 如果 start_j > curr_j,说明是跳过 HTML 行 (空范围)
if start_j <= curr_j:
indices = list(range(start_j, curr_j + 1))
mapping[curr_i] = indices
used_groups.update(indices)
else:
mapping[curr_i] = []
curr_j = prev_j
curr_i -= 1
else:
break
# 填补未匹配的行
for i in range(n_html):
if i not in mapping:
mapping[i] = []
# --- 4. 后处理:未匹配组的归属 (Orphans) ---
unused_groups = [i for i in range(len(grouped_boxes)) if i not in used_groups]
if unused_groups:
print(f" ℹ️ 发现 {len(unused_groups)} 个未匹配的 paddle 组: {unused_groups}")
for unused_idx in unused_groups:
unused_group = grouped_boxes[unused_idx]
unused_y_min = min(b['bbox'][1] for b in unused_group['boxes'])
unused_y_max = max(b['bbox'][3] for b in unused_group['boxes'])
above_idx = None
below_idx = None
above_distance = float('inf')
below_distance = float('inf')
for i in range(unused_idx - 1, -1, -1):
if i in used_groups:
above_idx = i
above_group = grouped_boxes[i]
max_y_box = max(above_group['boxes'], key=lambda b: b['bbox'][3])
above_y_center = (max_y_box['bbox'][1] + max_y_box['bbox'][3]) / 2
above_distance = abs(unused_y_min - above_y_center)
break
for i in range(unused_idx + 1, len(grouped_boxes)):
if i in used_groups:
below_idx = i
below_group = grouped_boxes[i]
min_y_box = min(below_group['boxes'], key=lambda b: b['bbox'][1])
below_y_center = (min_y_box['bbox'][1] + min_y_box['bbox'][3]) / 2
below_distance = abs(below_y_center - unused_y_max)
break
closest_used_idx = None
merge_direction = ""
if above_idx is not None and below_idx is not None:
if above_distance < below_distance:
closest_used_idx = above_idx
merge_direction = "上方"
else:
closest_used_idx = below_idx
merge_direction = "下方"
elif above_idx is not None:
closest_used_idx = above_idx
merge_direction = "上方"
elif below_idx is not None:
closest_used_idx = below_idx
merge_direction = "下方"
if closest_used_idx is not None:
target_html_row = None
for html_row_idx, group_indices in mapping.items():
if closest_used_idx in group_indices:
target_html_row = html_row_idx
break
if target_html_row is not None:
if unused_idx not in mapping[target_html_row]:
mapping[target_html_row].append(unused_idx)
mapping[target_html_row].sort()
print(f" • 组 {unused_idx} 合并到 HTML 行 {target_html_row}({merge_direction}行)")
used_groups.add(unused_idx)
# 🔑 策略 4: 第三遍 - 按 y 坐标排序每行的组索引
for row_idx in mapping:
if mapping[row_idx]:
mapping[row_idx].sort(key=lambda idx: grouped_boxes[idx]['y_center'])
return mapping
def _calculate_similarity(self, text1: str, text2: str) -> float:
"""
计算两个文本的相似度,结合字符覆盖率和序列相似度 (性能优化版)
"""
if not text1 or not text2:
return 0.0
len1, len2 = len(text1), len(text2)
# ⚡️ 优化 1: 长度快速检查
# 如果长度差异过大(例如一个 50 字符,一个 2 字符),直接认为不匹配
if len1 > 0 and len2 > 0:
min_l, max_l = min(len1, len2), max(len1, len2)
if max_l > 10 and min_l / max_l < 0.2:
return 0.0
# 1. 字符覆盖率 (Character Overlap)
from collections import Counter
c1 = Counter(text1)
c2 = Counter(text2)
intersection = c1 & c2
overlap_count = sum(intersection.values())
coverage = overlap_count / len1 if len1 > 0 else 0
# ⚡️ 优化 2: 覆盖率低时跳过昂贵的 fuzz 计算
# 如果字符重叠率低于 30%,说明内容基本不相关,没必要算序列相似度
if coverage < 0.3:
return coverage * 0.7
# 2. 序列相似度 (Sequence Similarity)
# 使用 token_sort_ratio 来容忍一定的乱序
seq_score = fuzz.token_sort_ratio(text1, text2) / 100.0
return (coverage * 0.7) + (seq_score * 0.3)
def _preprocess_text_for_matching(self, text: str) -> str:
"""
预处理文本:在不同类型的字符(如中文和数字/英文)之间插入空格,
以便于 token_sort_ratio 更准确地进行分词和匹配。
"""
if not text:
return ""
import re
# 1. 在中文和非中文(数字/字母)之间插入空格
# 例如: "2024年" -> "2024 年", "ID号码123" -> "ID号码 123"
text = re.sub(r'([\u4e00-\u9fa5])([a-zA-Z0-9])', r'\1 \2', text)
text = re.sub(r'([a-zA-Z0-9])([\u4e00-\u9fa5])', r'\1 \2', text)
return text
def _calculate_subsequence_score(self, target: str, source: str) -> float:
"""
计算子序列匹配得分 (解决 OCR 噪音插入问题)
例如: Target="12345", Source="12(date)34(time)5" -> Score close to 100
"""
# 1. 仅保留字母和数字,忽略符号干扰
t_clean = "".join(c for c in target if c.isalnum())
s_clean = "".join(c for c in source if c.isalnum())
if not t_clean or not s_clean:
return 0.0
# 2. 贪婪匹配子序列
t_idx, s_idx = 0, 0
matches = 0
while t_idx < len(t_clean) and s_idx < len(s_clean):
if t_clean[t_idx] == s_clean[s_idx]:
matches += 1
t_idx += 1
s_idx += 1
else:
# 跳过 source 中的噪音字符
s_idx += 1
# 3. 计算得分
match_rate = matches / len(t_clean)
# 如果匹配率太低,直接返回
if match_rate < 0.8:
return match_rate * 100
# 4. 噪音惩罚 (防止 Target="1", Source="123456789" 这种误判)
# 计算噪音长度
noise_len = len(s_clean) - matches
# 允许一定比例的噪音 (例如日期时间插入,通常占总长度的 30%-50%)
# 如果噪音长度超过目标长度的 60%,开始扣分
penalty = 0
if noise_len > len(t_clean) * 0.6:
excess_noise = noise_len - (len(t_clean) * 0.6)
penalty = excess_noise * 0.5 # 每多一个噪音字符扣 0.5 分
penalty = min(penalty, 20) # 最多扣 20 分
final_score = (match_rate * 100) - penalty
return max(0, final_score)
def _match_cell_sequential(self, cell_text: str,
boxes: List[Dict],
col_boundaries: List[Tuple[int, int]],
start_idx: int) -> Optional[Dict]:
"""
🎯 顺序匹配单元格:从指定位置开始,逐步合并 boxes 直到匹配
"""
cell_text_normalized = self.text_matcher.normalize_text(cell_text)
cell_text_processed = self._preprocess_text_for_matching(cell_text)
if len(cell_text_normalized) < 1:
return None
# 🔑 找到第一个未使用的 box
first_unused_idx = start_idx
while first_unused_idx < len(boxes) and boxes[first_unused_idx].get('used'):
first_unused_idx += 1
if first_unused_idx >= len(boxes):
return None
# 🔑 策略 1: 单个 box 精确匹配
for box in boxes[first_unused_idx:]:
box_text = self.text_matcher.normalize_text(box['text'])
if cell_text_normalized == box_text:
return self._build_match_result([box], box['text'], 100.0, boxes.index(box))
# 🔑 策略 2: 多个 boxes 合并匹配
unused_boxes = [b for b in boxes if not b.get('used')]
# 合并同列的 boxes 合并
merged_bboxes = []
for col_idx in range(len(col_boundaries)):
combo_boxes = self._get_boxes_in_column(unused_boxes, col_boundaries, col_idx)
if len(combo_boxes) > 0:
sorted_combo = sorted(combo_boxes, key=lambda b: (b['bbox'][1], b['bbox'][0]))
# 🎯 改进:使用空格连接,以便于 token_sort_ratio 进行乱序匹配
merged_text = ' '.join([b['text'] for b in sorted_combo])
merged_bboxes.append({
'text': merged_text,
'sorted_combo': sorted_combo
})
for box in merged_bboxes:
# 1. 精确匹配
merged_text_normalized = self.text_matcher.normalize_text(box['text'])
if cell_text_normalized == merged_text_normalized:
last_sort_idx = boxes.index(box['sorted_combo'][-1])
return self._build_match_result(box['sorted_combo'], box['text'], 100.0, last_sort_idx)
# 2. 子串匹配
is_substring = (cell_text_normalized in merged_text_normalized or
merged_text_normalized in cell_text_normalized)
# 3. 模糊匹配
# 🎯 改进:使用预处理后的文本进行 token_sort_ratio 计算
box_text_processed = self._preprocess_text_for_matching(box['text'])
# token_sort_ratio: 自动分词并排序比较,解决 OCR 结果顺序与 HTML 不一致的问题
token_sort_sim = fuzz.token_sort_ratio(cell_text_processed, box_text_processed)
# partial_ratio: 子串模糊匹配,解决 OCR 识别错误
partial_sim = fuzz.partial_ratio(cell_text_normalized, merged_text_normalized)
# 🎯 新增:token_set_ratio (集合匹配)
# 专门解决:目标文本被 OCR 文本中的噪音隔开的情况
# 例如 Target="A B", OCR="A noise B" -> token_set_ratio 会很高
token_set_sim = fuzz.token_set_ratio(cell_text_processed, box_text_processed)
# 🎯 策略 4: 重构匹配 (Reconstruction Match) - 解决 ID 被噪音打断的问题
# 逻辑:提取 OCR 中所有属于 Target 子串的 token,拼起来再比
reconstruct_sim = 0.0
if len(cell_text_normalized) > 10: # 仅对长文本启用,防止短文本误判
# 使用预处理后的文本分词 (已处理中文/数字间隔)
box_tokens = box_text_processed.split()
# 筛选出所有是目标文本子串的 token
valid_tokens = []
for token in box_tokens:
# 忽略太短的 token (除非目标也很短),防止 "1" 这种误匹配
if len(token) < 2 and len(cell_text_normalized) > 5:
continue
if token in cell_text_normalized:
valid_tokens.append(token)
if valid_tokens:
# 拼接回原始形态
reconstructed_text = "".join(valid_tokens)
reconstruct_sim = fuzz.ratio(cell_text_normalized, reconstructed_text)
if reconstruct_sim > 90:
print(f" 🧩 重构匹配生效: '{reconstructed_text}' (sim={reconstruct_sim})")
# 🎯 策略 5: 子序列匹配 (Subsequence Match) - 解决粘连噪音问题
# 专门针对: '1544...1050' + '2024-08-10' + '0433...' 这种场景
subseq_sim = 0.0
if len(cell_text_normalized) > 8: # 仅对较长文本启用
subseq_sim = self._calculate_subsequence_score(cell_text_normalized, merged_text_normalized)
# 🛡️ 关键修复:长度和类型防御
if subseq_sim > 80:
len_cell = len(cell_text_normalized)
len_box = len(merged_text_normalized)
# 1. 长度差异过大 (Box 比 Cell 长很多)
if len_box > len_cell * 1.5:
# 2. 且 Cell 是数字/日期/时间类型 (容易在长ID中误配)
import re
# 匹配纯数字、日期时间格式
if re.match(r'^[\d\-\:\.\s]+$', cell_text_normalized):
print(f" ⚠️ 拒绝子序列匹配: 长度差异大且为数字类型 (sim={subseq_sim})")
subseq_sim = 0.0
if subseq_sim > 90:
print(f" 🔗 子序列匹配生效: '{cell_text[:10]}...' (sim={subseq_sim:.1f})")
# 综合得分:取五者最大值
similarity = max(token_sort_sim, partial_sim, token_set_sim, reconstruct_sim, subseq_sim)
# 🎯 子串匹配加分
if is_substring:
similarity = min(100, similarity + 10)
# 🎯 长度惩罚:如果 box 内容比 cell 多太多(例如吞了下一个单元格),扣分
# 注意:token_set_ratio 对长度不敏感,所以这里必须严格检查长度,防止误判
# 只有当 similarity 很高时才检查,防止误杀
if similarity > 80:
len_cell = len(cell_text_normalized)
len_box = len(merged_text_normalized)
# 如果是 token_set_sim 贡献的高分,说明 OCR 里包含了很多噪音
# 我们需要确保这些噪音不是“下一个单元格的内容”
# 这里可以加一个更严格的长度检查,或者检查是否包含换行符等
if len_box > len_cell * 2.0 + 10: # 放宽一点,因为 token_set 本来就是处理噪音的
similarity -= 10 # 稍微扣一点分,表示虽然全找到了,但噪音太多不太完美
if similarity >= self.text_matcher.similarity_threshold:
print(f" ✓ 匹配成功: '{cell_text[:15]}' vs '{box['text'][:15]}' (相似度: {similarity})")
# 由于是模糊匹配,返回第一个未使用的 box 作为 last_index
for b in boxes:
if not b.get('used'):
last_idx = max(boxes.index(b)-1, 0)
break
return self._build_match_result(box['sorted_combo'], box['text'], similarity, max(start_idx, last_idx))
print(f" ✗ 匹配失败: '{cell_text[:15]}'")
return None
def _build_match_result(self, boxes: List[Dict], text: str,
score: float, last_index: int) -> Dict:
"""构建匹配结果(使用原始坐标)"""
# 🔑 关键修复:使用 original_bbox(如果存在)
def get_original_bbox(box: Dict) -> List[int]:
return box.get('original_bbox', box['bbox'])
original_bboxes = [get_original_bbox(b) for b in boxes]
merged_bbox = [
min(b[0] for b in original_bboxes),
min(b[1] for b in original_bboxes),
max(b[2] for b in original_bboxes),
max(b[3] for b in original_bboxes)
]
return {
'bbox': merged_bbox, # ✅ 使用原始坐标
'text': text,
'score': score,
'paddle_indices': [b['paddle_bbox_index'] for b in boxes],
'used_boxes': boxes,
'last_used_index': last_index
}