|
|
@@ -873,40 +873,85 @@ class TableCellMatcher:
|
|
|
return coverage * 0.7
|
|
|
|
|
|
# 2. 序列相似度 (Sequence Similarity)
|
|
|
- from fuzzywuzzy import fuzz
|
|
|
# 使用 token_sort_ratio 来容忍一定的乱序
|
|
|
seq_score = fuzz.token_sort_ratio(text1, text2) / 100.0
|
|
|
|
|
|
return (coverage * 0.7) + (seq_score * 0.3)
|
|
|
|
|
|
+ def _preprocess_text_for_matching(self, text: str) -> str:
|
|
|
+ """
|
|
|
+ 预处理文本:在不同类型的字符(如中文和数字/英文)之间插入空格,
|
|
|
+ 以便于 token_sort_ratio 更准确地进行分词和匹配。
|
|
|
+ """
|
|
|
+ if not text:
|
|
|
+ return ""
|
|
|
+ import re
|
|
|
+ # 1. 在中文和非中文(数字/字母)之间插入空格
|
|
|
+ # 例如: "2024年" -> "2024 年", "ID号码123" -> "ID号码 123"
|
|
|
+ text = re.sub(r'([\u4e00-\u9fa5])([a-zA-Z0-9])', r'\1 \2', text)
|
|
|
+ text = re.sub(r'([a-zA-Z0-9])([\u4e00-\u9fa5])', r'\1 \2', text)
|
|
|
+ return text
|
|
|
+
|
|
|
+ def _calculate_subsequence_score(self, target: str, source: str) -> float:
|
|
|
+ """
|
|
|
+ 计算子序列匹配得分 (解决 OCR 噪音插入问题)
|
|
|
+ 例如: Target="12345", Source="12(date)34(time)5" -> Score close to 100
|
|
|
+ """
|
|
|
+ # 1. 仅保留字母和数字,忽略符号干扰
|
|
|
+ t_clean = "".join(c for c in target if c.isalnum())
|
|
|
+ s_clean = "".join(c for c in source if c.isalnum())
|
|
|
+
|
|
|
+ if not t_clean or not s_clean:
|
|
|
+ return 0.0
|
|
|
+
|
|
|
+ # 2. 贪婪匹配子序列
|
|
|
+ t_idx, s_idx = 0, 0
|
|
|
+ matches = 0
|
|
|
+
|
|
|
+ while t_idx < len(t_clean) and s_idx < len(s_clean):
|
|
|
+ if t_clean[t_idx] == s_clean[s_idx]:
|
|
|
+ matches += 1
|
|
|
+ t_idx += 1
|
|
|
+ s_idx += 1
|
|
|
+ else:
|
|
|
+ # 跳过 source 中的噪音字符
|
|
|
+ s_idx += 1
|
|
|
+
|
|
|
+ # 3. 计算得分
|
|
|
+ match_rate = matches / len(t_clean)
|
|
|
+
|
|
|
+ # 如果匹配率太低,直接返回
|
|
|
+ if match_rate < 0.8:
|
|
|
+ return match_rate * 100
|
|
|
+
|
|
|
+ # 4. 噪音惩罚 (防止 Target="1", Source="123456789" 这种误判)
|
|
|
+ # 计算噪音长度
|
|
|
+ noise_len = len(s_clean) - matches
|
|
|
+
|
|
|
+ # 允许一定比例的噪音 (例如日期时间插入,通常占总长度的 30%-50%)
|
|
|
+ # 如果噪音长度超过目标长度的 60%,开始扣分
|
|
|
+ penalty = 0
|
|
|
+ if noise_len > len(t_clean) * 0.6:
|
|
|
+ excess_noise = noise_len - (len(t_clean) * 0.6)
|
|
|
+ penalty = excess_noise * 0.5 # 每多一个噪音字符扣 0.5 分
|
|
|
+ penalty = min(penalty, 20) # 最多扣 20 分
|
|
|
+
|
|
|
+ final_score = (match_rate * 100) - penalty
|
|
|
+ return max(0, final_score)
|
|
|
+
|
|
|
def _match_cell_sequential(self, cell_text: str,
|
|
|
boxes: List[Dict],
|
|
|
col_boundaries: List[Tuple[int, int]],
|
|
|
start_idx: int) -> Optional[Dict]:
|
|
|
"""
|
|
|
🎯 顺序匹配单元格:从指定位置开始,逐步合并 boxes 直到匹配
|
|
|
-
|
|
|
- 策略:
|
|
|
- 1. 找到第一个未使用的 box
|
|
|
- 2. 尝试单个 box 精确匹配
|
|
|
- 3. 如果失败,尝试合并多个 boxes
|
|
|
-
|
|
|
- Args:
|
|
|
- cell_text: HTML 单元格文本
|
|
|
- boxes: 候选 boxes(已按 x 坐标排序)
|
|
|
- col_boundaries: 列边界列表
|
|
|
- start_idx: 起始索引
|
|
|
-
|
|
|
- Returns:
|
|
|
- {'bbox': [x1,y1,x2,y2], 'text': str, 'score': float,
|
|
|
- 'paddle_indices': [idx1, idx2], 'used_boxes': [box1, box2],
|
|
|
- 'last_used_index': int}
|
|
|
"""
|
|
|
cell_text_normalized = self.text_matcher.normalize_text(cell_text)
|
|
|
+ cell_text_processed = self._preprocess_text_for_matching(cell_text)
|
|
|
|
|
|
- if len(cell_text_normalized) < 2:
|
|
|
+ if len(cell_text_normalized) < 1:
|
|
|
return None
|
|
|
-
|
|
|
+
|
|
|
# 🔑 找到第一个未使用的 box
|
|
|
first_unused_idx = start_idx
|
|
|
while first_unused_idx < len(boxes) and boxes[first_unused_idx].get('used'):
|
|
|
@@ -917,9 +962,6 @@ class TableCellMatcher:
|
|
|
|
|
|
# 🔑 策略 1: 单个 box 精确匹配
|
|
|
for box in boxes[first_unused_idx:]:
|
|
|
- if box.get('used'):
|
|
|
- continue
|
|
|
-
|
|
|
box_text = self.text_matcher.normalize_text(box['text'])
|
|
|
|
|
|
if cell_text_normalized == box_text:
|
|
|
@@ -933,7 +975,8 @@ class TableCellMatcher:
|
|
|
combo_boxes = self._get_boxes_in_column(unused_boxes, col_boundaries, col_idx)
|
|
|
if len(combo_boxes) > 0:
|
|
|
sorted_combo = sorted(combo_boxes, key=lambda b: (b['bbox'][1], b['bbox'][0]))
|
|
|
- merged_text = ''.join([b['text'] for b in sorted_combo])
|
|
|
+ # 🎯 改进:使用空格连接,以便于 token_sort_ratio 进行乱序匹配
|
|
|
+ merged_text = ' '.join([b['text'] for b in sorted_combo])
|
|
|
merged_bboxes.append({
|
|
|
'text': merged_text,
|
|
|
'sorted_combo': sorted_combo
|
|
|
@@ -951,20 +994,96 @@ class TableCellMatcher:
|
|
|
merged_text_normalized in cell_text_normalized)
|
|
|
|
|
|
# 3. 模糊匹配
|
|
|
- similarity = fuzz.partial_ratio(cell_text_normalized, merged_text_normalized)
|
|
|
+ # 🎯 改进:使用预处理后的文本进行 token_sort_ratio 计算
|
|
|
+ box_text_processed = self._preprocess_text_for_matching(box['text'])
|
|
|
|
|
|
+ # token_sort_ratio: 自动分词并排序比较,解决 OCR 结果顺序与 HTML 不一致的问题
|
|
|
+ token_sort_sim = fuzz.token_sort_ratio(cell_text_processed, box_text_processed)
|
|
|
+
|
|
|
+ # partial_ratio: 子串模糊匹配,解决 OCR 识别错误
|
|
|
+ partial_sim = fuzz.partial_ratio(cell_text_normalized, merged_text_normalized)
|
|
|
+
|
|
|
+ # 🎯 新增:token_set_ratio (集合匹配)
|
|
|
+ # 专门解决:目标文本被 OCR 文本中的噪音隔开的情况
|
|
|
+ # 例如 Target="A B", OCR="A noise B" -> token_set_ratio 会很高
|
|
|
+ token_set_sim = fuzz.token_set_ratio(cell_text_processed, box_text_processed)
|
|
|
+
|
|
|
+ # 🎯 策略 4: 重构匹配 (Reconstruction Match) - 解决 ID 被噪音打断的问题
|
|
|
+ # 逻辑:提取 OCR 中所有属于 Target 子串的 token,拼起来再比
|
|
|
+ reconstruct_sim = 0.0
|
|
|
+ if len(cell_text_normalized) > 10: # 仅对长文本启用,防止短文本误判
|
|
|
+ # 使用预处理后的文本分词 (已处理中文/数字间隔)
|
|
|
+ box_tokens = box_text_processed.split()
|
|
|
+ # 筛选出所有是目标文本子串的 token
|
|
|
+ valid_tokens = []
|
|
|
+ for token in box_tokens:
|
|
|
+ # 忽略太短的 token (除非目标也很短),防止 "1" 这种误匹配
|
|
|
+ if len(token) < 2 and len(cell_text_normalized) > 5:
|
|
|
+ continue
|
|
|
+ if token in cell_text_normalized:
|
|
|
+ valid_tokens.append(token)
|
|
|
+
|
|
|
+ if valid_tokens:
|
|
|
+ # 拼接回原始形态
|
|
|
+ reconstructed_text = "".join(valid_tokens)
|
|
|
+ reconstruct_sim = fuzz.ratio(cell_text_normalized, reconstructed_text)
|
|
|
+ if reconstruct_sim > 90:
|
|
|
+ print(f" 🧩 重构匹配生效: '{reconstructed_text}' (sim={reconstruct_sim})")
|
|
|
+
|
|
|
+ # 🎯 策略 5: 子序列匹配 (Subsequence Match) - 解决粘连噪音问题
|
|
|
+ # 专门针对: '1544...1050' + '2024-08-10' + '0433...' 这种场景
|
|
|
+ subseq_sim = 0.0
|
|
|
+ if len(cell_text_normalized) > 8: # 仅对较长文本启用
|
|
|
+ subseq_sim = self._calculate_subsequence_score(cell_text_normalized, merged_text_normalized)
|
|
|
+ # 🛡️ 关键修复:长度和类型防御
|
|
|
+ if subseq_sim > 80:
|
|
|
+ len_cell = len(cell_text_normalized)
|
|
|
+ len_box = len(merged_text_normalized)
|
|
|
+
|
|
|
+ # 1. 长度差异过大 (Box 比 Cell 长很多)
|
|
|
+ if len_box > len_cell * 1.5:
|
|
|
+ # 2. 且 Cell 是数字/日期/时间类型 (容易在长ID中误配)
|
|
|
+ import re
|
|
|
+ # 匹配纯数字、日期时间格式
|
|
|
+ if re.match(r'^[\d\-\:\.\s]+$', cell_text_normalized):
|
|
|
+ print(f" ⚠️ 拒绝子序列匹配: 长度差异大且为数字类型 (sim={subseq_sim})")
|
|
|
+ subseq_sim = 0.0
|
|
|
+
|
|
|
+ if subseq_sim > 90:
|
|
|
+ print(f" 🔗 子序列匹配生效: '{cell_text[:10]}...' (sim={subseq_sim:.1f})")
|
|
|
+
|
|
|
+ # 综合得分:取五者最大值
|
|
|
+ similarity = max(token_sort_sim, partial_sim, token_set_sim, reconstruct_sim, subseq_sim)
|
|
|
+
|
|
|
# 🎯 子串匹配加分
|
|
|
if is_substring:
|
|
|
similarity = min(100, similarity + 10)
|
|
|
|
|
|
+ # 🎯 长度惩罚:如果 box 内容比 cell 多太多(例如吞了下一个单元格),扣分
|
|
|
+ # 注意:token_set_ratio 对长度不敏感,所以这里必须严格检查长度,防止误判
|
|
|
+ # 只有当 similarity 很高时才检查,防止误杀
|
|
|
+ if similarity > 80:
|
|
|
+ len_cell = len(cell_text_normalized)
|
|
|
+ len_box = len(merged_text_normalized)
|
|
|
+
|
|
|
+ # 如果是 token_set_sim 贡献的高分,说明 OCR 里包含了很多噪音
|
|
|
+ # 我们需要确保这些噪音不是“下一个单元格的内容”
|
|
|
+ # 这里可以加一个更严格的长度检查,或者检查是否包含换行符等
|
|
|
+ if len_box > len_cell * 2.0 + 10: # 放宽一点,因为 token_set 本来就是处理噪音的
|
|
|
+ similarity -= 10 # 稍微扣一点分,表示虽然全找到了,但噪音太多不太完美
|
|
|
+
|
|
|
if similarity >= self.text_matcher.similarity_threshold:
|
|
|
- print(f" ✓ 匹配成功: '{cell_text[:15]}' vs '{merged_text[:15]}' (相似度: {similarity})")
|
|
|
- return self._build_match_result(box['sorted_combo'], box['text'], similarity, start_idx)
|
|
|
+ print(f" ✓ 匹配成功: '{cell_text[:15]}' vs '{box['text'][:15]}' (相似度: {similarity})")
|
|
|
+ # 由于是模糊匹配,返回第一个未使用的 box 作为 last_index
|
|
|
+ for b in boxes:
|
|
|
+ if not b.get('used'):
|
|
|
+ last_idx = max(boxes.index(b)-1, 0)
|
|
|
+ break
|
|
|
+ return self._build_match_result(box['sorted_combo'], box['text'], similarity, max(start_idx, last_idx))
|
|
|
|
|
|
print(f" ✗ 匹配失败: '{cell_text[:15]}'")
|
|
|
return None
|
|
|
|
|
|
-
|
|
|
def _build_match_result(self, boxes: List[Dict], text: str,
|
|
|
score: float, last_index: int) -> Dict:
|
|
|
"""构建匹配结果(使用原始坐标)"""
|