table_comparator.py 34 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849
  1. import re
  2. import sys
  3. from typing import Dict, List, Tuple, Optional
  4. from pathlib import Path
  5. # 添加 ocr_platform 根目录到 Python 路径
  6. # 使用 resolve() 确保路径是绝对路径,避免相对路径导致的 IndexError
  7. _file_path = Path(__file__).resolve()
  8. ocr_platform_root = _file_path.parents[1] # table_comparator.py -> ocr_comparator -> ocr_platform
  9. if str(ocr_platform_root) not in sys.path:
  10. sys.path.insert(0, str(ocr_platform_root))
  11. try:
  12. from .data_type_detector import DataTypeDetector
  13. from .similarity_calculator import SimilarityCalculator
  14. from .text_processor import TextProcessor
  15. except ImportError:
  16. from data_type_detector import DataTypeDetector
  17. from similarity_calculator import SimilarityCalculator
  18. from text_processor import TextProcessor
  19. class TableComparator:
  20. """表格数据比较"""
  21. def __init__(self):
  22. self.detector = DataTypeDetector()
  23. self.calculator = SimilarityCalculator()
  24. self.text_processor = TextProcessor()
  25. self.header_similarity_threshold = 90
  26. self.content_similarity_threshold = 95
  27. self.max_paragraph_window = 6
  28. def find_matching_tables(self, tables1: List[List[List[str]]],
  29. tables2: List[List[List[str]]]) -> List[Tuple[int, int, float]]:
  30. """
  31. 智能匹配两个文件中的表格
  32. Returns:
  33. List[Tuple[int, int, float]]: (table1_index, table2_index, similarity_score)
  34. """
  35. matches = []
  36. for i, table1 in enumerate(tables1):
  37. if not table1:
  38. continue
  39. best_match = None
  40. best_score = 0
  41. for j, table2 in enumerate(tables2):
  42. if not table2:
  43. continue
  44. # 计算表格相似度
  45. score = self._calculate_table_similarity(table1, table2)
  46. if score > best_score:
  47. best_score = score
  48. best_match = j
  49. if best_match is not None and best_score > 50: # 至少50%相似度
  50. matches.append((i, best_match, best_score))
  51. print(f" 📊 表格匹配: 文件1表格{i+1} ↔ 文件2表格{best_match+1} (相似度: {best_score:.1f}%)")
  52. return matches
  53. def _get_max_columns(self, table: List[List[str]]) -> int:
  54. """获取表格的最大列数"""
  55. if not table:
  56. return 0
  57. return max(len(row) for row in table)
  58. def _calculate_table_similarity(self, table1: List[List[str]],
  59. table2: List[List[str]]) -> float:
  60. """计算两个表格的相似度"""
  61. if not table1 or not table2:
  62. return 0.0
  63. # 1. 行数相似度 (权重: 15%)
  64. row_count1 = len(table1)
  65. row_count2 = len(table2)
  66. row_similarity = 100 * (1 - abs(row_count1 - row_count2) / max(row_count1, row_count2))
  67. # 2. 列数相似度 (权重: 15%) - ✅ 使用最大列数
  68. col_count1 = self._get_max_columns(table1)
  69. col_count2 = self._get_max_columns(table2)
  70. max_cols = max(col_count1, col_count2)
  71. min_cols = min(col_count1, col_count2)
  72. if max_cols == 0:
  73. col_similarity = 0
  74. else:
  75. # 如果列数差异在合理范围内(比如差1-2列),给予较高分数
  76. col_diff = abs(col_count1 - col_count2)
  77. if col_diff == 0:
  78. col_similarity = 100
  79. elif col_diff <= 2:
  80. # 差1-2列,给予80-95分
  81. col_similarity = 100 - (col_diff * 10)
  82. else:
  83. # 差异较大时,使用比例计算
  84. col_similarity = 100 * (min_cols / max_cols)
  85. print(f" 行数对比: {row_count1} vs {row_count2}, 相似度: {row_similarity:.1f}%")
  86. print(f" 列数对比: {col_count1} vs {col_count2}, 相似度: {col_similarity:.1f}%")
  87. # 3. 表头相似度 (权重: 50%) - ✅ 先检测表头位置
  88. header_row_idx1 = self.detect_table_header_row(table1)
  89. header_row_idx2 = self.detect_table_header_row(table2)
  90. print(f" 表头位置: 文件1第{header_row_idx1+1}行, 文件2第{header_row_idx2+1}行")
  91. header_similarity = 0
  92. if header_row_idx1 < len(table1) and header_row_idx2 < len(table2):
  93. header1 = table1[header_row_idx1]
  94. header2 = table2[header_row_idx2]
  95. if header1 and header2:
  96. # ✅ 智能表头匹配
  97. header_similarity = self._calculate_header_similarity_smart(header1, header2)
  98. print(f" 表头相似度: {header_similarity:.1f}%")
  99. # 4. 内容特征相似度 (权重: 20%)
  100. content_similarity = self._calculate_content_features_similarity(table1, table2)
  101. print(f" 内容特征相似度: {content_similarity:.1f}%")
  102. # ✅ 调整权重分配
  103. total_similarity = (
  104. row_similarity * 0.15 + # 行数 15%
  105. col_similarity * 0.15 + # 列数 15%
  106. header_similarity * 0.50 + # 表头 50% (最重要)
  107. content_similarity * 0.20 # 内容 20%
  108. )
  109. return total_similarity
  110. def _calculate_header_similarity_smart(self, header1: List[str],
  111. header2: List[str]) -> float:
  112. """
  113. 智能计算表头相似度
  114. 处理以下情况:
  115. 1. 列数不同但表头内容相似
  116. 2. PaddleOCR可能将多行表头合并
  117. 3. 表头顺序可能不同
  118. """
  119. if not header1 or not header2:
  120. return 0.0
  121. # 标准化表头
  122. norm_headers1 = [self.normalize_header_text(h) for h in header1]
  123. norm_headers2 = [self.normalize_header_text(h) for h in header2]
  124. # 方法1: 精确匹配 (最高优先级)
  125. common_headers = set(norm_headers1) & set(norm_headers2)
  126. max_len = max(len(norm_headers1), len(norm_headers2))
  127. if max_len == 0:
  128. return 0.0
  129. exact_match_ratio = len(common_headers) / max_len
  130. # 方法2: 模糊匹配 (针对列数不同的情况)
  131. fuzzy_matches = 0
  132. # 使用较短的表头作为基准
  133. if len(norm_headers1) <= len(norm_headers2):
  134. base_headers = norm_headers1
  135. compare_headers = norm_headers2
  136. else:
  137. base_headers = norm_headers2
  138. compare_headers = norm_headers1
  139. for base_h in base_headers:
  140. best_similarity = 0
  141. for comp_h in compare_headers:
  142. similarity = self.calculator.calculate_text_similarity(base_h, comp_h)
  143. if similarity > best_similarity:
  144. best_similarity = similarity
  145. if best_similarity == 100:
  146. break
  147. # 如果相似度超过70%,认为是匹配的
  148. if best_similarity > 70:
  149. fuzzy_matches += 1
  150. fuzzy_match_ratio = fuzzy_matches / max_len if max_len > 0 else 0
  151. # 方法3: 关键字匹配 (识别常见表头)
  152. key_headers = {
  153. 'date': ['日期', 'date', '时间', 'time'],
  154. 'type': ['类型', 'type', '业务', 'business'],
  155. 'number': ['号', 'no', '编号', 'id', '票据', 'bill'],
  156. 'description': ['摘要', 'description', '说明', 'remark'],
  157. 'amount': ['金额', 'amount', '借方', 'debit', '贷方', 'credit'],
  158. 'balance': ['余额', 'balance'],
  159. 'counterparty': ['对手', 'counterparty', '账户', 'account', '户名', 'name']
  160. }
  161. def categorize_header(h: str) -> set:
  162. categories = set()
  163. h_lower = h.lower()
  164. for category, keywords in key_headers.items():
  165. for keyword in keywords:
  166. if keyword in h_lower:
  167. categories.add(category)
  168. return categories
  169. categories1 = set()
  170. for h in norm_headers1:
  171. categories1.update(categorize_header(h))
  172. categories2 = set()
  173. for h in norm_headers2:
  174. categories2.update(categorize_header(h))
  175. common_categories = categories1 & categories2
  176. all_categories = categories1 | categories2
  177. category_match_ratio = len(common_categories) / len(all_categories) if all_categories else 0
  178. # ✅ 综合三种方法,加权计算
  179. final_similarity = (
  180. exact_match_ratio * 0.4 + # 精确匹配 40%
  181. fuzzy_match_ratio * 0.4 + # 模糊匹配 40%
  182. category_match_ratio * 0.2 # 语义匹配 20%
  183. ) * 100
  184. print(f" 精确匹配: {exact_match_ratio:.1%}, 模糊匹配: {fuzzy_match_ratio:.1%}, 语义匹配: {category_match_ratio:.1%}")
  185. return final_similarity
  186. def _calculate_content_features_similarity(self, table1: List[List[str]],
  187. table2: List[List[str]]) -> float:
  188. """计算表格内容特征相似度"""
  189. # 统计数字、日期等特征
  190. features1 = self._extract_table_features(table1)
  191. features2 = self._extract_table_features(table2)
  192. # 比较特征
  193. similarity_scores = []
  194. for key in ['numeric_ratio', 'date_ratio', 'empty_ratio']:
  195. if key in features1 and key in features2:
  196. diff = abs(features1[key] - features2[key])
  197. similarity_scores.append(100 * (1 - diff))
  198. return sum(similarity_scores) / len(similarity_scores) if similarity_scores else 0
  199. def _extract_table_features(self, table: List[List[str]]) -> Dict:
  200. """提取表格特征"""
  201. total_cells = 0
  202. numeric_cells = 0
  203. date_cells = 0
  204. empty_cells = 0
  205. for row in table:
  206. for cell in row:
  207. total_cells += 1
  208. if not cell or cell.strip() == '':
  209. empty_cells += 1
  210. continue
  211. if self.detector.is_numeric(cell):
  212. numeric_cells += 1
  213. if self.detector.extract_datetime(cell):
  214. date_cells += 1
  215. return {
  216. 'numeric_ratio': numeric_cells / total_cells if total_cells > 0 else 0,
  217. 'date_ratio': date_cells / total_cells if total_cells > 0 else 0,
  218. 'empty_ratio': empty_cells / total_cells if total_cells > 0 else 0,
  219. 'total_cells': total_cells
  220. }
  221. def normalize_header_text(self, text: str) -> str:
  222. """标准化表头文本"""
  223. # 移除括号内容
  224. text = re.sub(r'[((].*?[))]', '', text)
  225. # 移除空格
  226. text = re.sub(r'\s+', '', text)
  227. # 只保留字母、数字和中文
  228. text = re.sub(r'[^\w\u4e00-\u9fff]', '', text)
  229. return text.lower().strip()
  230. def detect_table_header_row(self, table: List[List[str]]) -> int:
  231. """
  232. 智能检测表格的表头行索引
  233. 检测策略:
  234. 1. 查找包含表头关键字最多的行
  235. 2. 确认下一行是数据行(或分类行)
  236. 3. 特殊处理:资产负债表等多层表头
  237. """
  238. if not table:
  239. return 0
  240. header_keywords = [
  241. '日期', 'date', '时间', 'time',
  242. '类型', 'type', '业务', 'business',
  243. '号', 'no', '编号', 'id', '票据', 'bill',
  244. '摘要', 'description', '说明', 'remark',
  245. '金额', 'amount', '借方', 'debit', '贷方', 'credit',
  246. '余额', 'balance',
  247. '对手', 'counterparty', '账户', 'account', '户名', 'name',
  248. # ✅ 新增:资产负债表关键词
  249. # '资产', 'asset', '负债', 'liability', '期末', 'period', '期初'
  250. '期末', 'period', '期初'
  251. ]
  252. best_header_row = 0
  253. best_score = 0
  254. for row_idx, row in enumerate(table[:10]):
  255. if not row:
  256. continue
  257. # 计算关键字匹配分数
  258. keyword_count = 0
  259. non_empty_cells = 0
  260. for cell in row:
  261. cell_text = str(cell).strip()
  262. if cell_text:
  263. non_empty_cells += 1
  264. cell_lower = cell_text.lower()
  265. for keyword in header_keywords:
  266. if keyword in cell_lower:
  267. keyword_count += 1
  268. break
  269. if non_empty_cells < 3:
  270. continue
  271. keyword_ratio = keyword_count / non_empty_cells if non_empty_cells > 0 else 0
  272. column_bonus = min(non_empty_cells / 5, 1.0)
  273. score = keyword_ratio * 0.7 + column_bonus * 0.3
  274. # ✅ 改进:跳过分类行和数据行检测
  275. if row_idx + 1 < len(table):
  276. next_row = table[row_idx + 1]
  277. # 如果下一行是数据行,加分
  278. if self._is_data_row(next_row):
  279. score += 0.2
  280. # ✅ 新增:如果下一行是分类行(如"流动资产:"),小幅加分
  281. elif self._is_category_row(next_row):
  282. score += 0.1
  283. if score > best_score:
  284. best_score = score
  285. best_header_row = row_idx
  286. if best_score < 0.3:
  287. print(f" ⚠️ 未检测到明确表头,默认使用第1行 (得分: {best_score:.2f})")
  288. return 0
  289. print(f" 📍 检测到表头在第 {best_header_row + 1} 行 (得分: {best_score:.2f})")
  290. return best_header_row
  291. def _is_category_row(self, row: List[str]) -> bool:
  292. """
  293. ✅ 新增:判断是否为分类行(如"流动资产:")
  294. """
  295. if not row:
  296. return False
  297. category_patterns = [
  298. # r'流动[资产负债]',
  299. # r'非流动[资产负债]',
  300. r'.*:$', # 以冒号结尾
  301. ]
  302. for cell in row:
  303. cell_text = str(cell).strip()
  304. if not cell_text:
  305. continue
  306. for pattern in category_patterns:
  307. if re.search(pattern, cell_text):
  308. return True
  309. return False
  310. def _is_data_row(self, row: List[str]) -> bool:
  311. """
  312. 判断是否为数据行(改进版)
  313. ✅ "-" 符号表示金额为0或空,应该被认为是有效的数据单元格
  314. """
  315. if not row:
  316. return False
  317. data_pattern_count = 0
  318. non_empty_count = 0
  319. for cell in row:
  320. cell_text = str(cell).strip()
  321. if not cell_text:
  322. continue
  323. non_empty_count += 1
  324. # ✅ "-" 符号也是有效的数据(表示0或空)
  325. if cell_text == '-' or cell_text == '—' or cell_text == '--':
  326. data_pattern_count += 1
  327. continue
  328. # 包含数字
  329. if re.search(r'\d', cell_text):
  330. data_pattern_count += 1
  331. # 包含日期格式
  332. if re.search(r'\d{4}[-/年]\d{1,2}[-/月]\d{1,2}', cell_text):
  333. data_pattern_count += 1
  334. # 包含金额格式
  335. if re.search(r'-?\d+[,,]?\d*\.?\d+', cell_text):
  336. data_pattern_count += 1
  337. if non_empty_count == 0:
  338. return False
  339. # 至少30%的单元格包含数据特征
  340. return data_pattern_count / non_empty_count >= 0.3
  341. def compare_table_headers(self, headers1: List[str], headers2: List[str]) -> Dict:
  342. """比较表格表头"""
  343. result = {
  344. 'match': True,
  345. 'differences': [],
  346. 'column_mapping': {},
  347. 'similarity_scores': []
  348. }
  349. if len(headers1) != len(headers2):
  350. result['match'] = False
  351. result['differences'].append({
  352. 'type': 'table_header_critical',
  353. 'description': f'表头列数不一致: {len(headers1)} vs {len(headers2)}',
  354. 'severity': 'critical'
  355. })
  356. return result
  357. for i, (h1, h2) in enumerate(zip(headers1, headers2)):
  358. norm_h1 = self.normalize_header_text(h1)
  359. norm_h2 = self.normalize_header_text(h2)
  360. similarity = self.calculator.calculate_text_similarity(norm_h1, norm_h2)
  361. result['similarity_scores'].append({
  362. 'column_index': i,
  363. 'header1': h1,
  364. 'header2': h2,
  365. 'similarity': similarity
  366. })
  367. if similarity < self.header_similarity_threshold:
  368. result['match'] = False
  369. result['differences'].append({
  370. 'type': 'table_header_mismatch',
  371. 'column_index': i,
  372. 'header1': h1,
  373. 'header2': h2,
  374. 'similarity': similarity,
  375. 'description': f'第{i+1}列表头不匹配: "{h1}" vs "{h2}" (相似度: {similarity:.1f}%)',
  376. 'severity': 'medium' if similarity < 50 else 'high'
  377. })
  378. else:
  379. result['column_mapping'][i] = i
  380. return result
  381. def compare_cell_value(self, value1: str, value2: str, column_type: str,
  382. column_name: str = '') -> Dict:
  383. """比较单元格值"""
  384. result = {
  385. 'match': True,
  386. 'difference': None
  387. }
  388. v1 = self.text_processor.normalize_text(value1)
  389. v2 = self.text_processor.normalize_text(value2)
  390. if v1 == v2:
  391. return result
  392. if column_type == 'text_number':
  393. norm_v1 = self.detector.normalize_text_number(v1)
  394. norm_v2 = self.detector.normalize_text_number(v2)
  395. if norm_v1 == norm_v2:
  396. result['match'] = False
  397. result['difference'] = {
  398. 'type': 'table_text',
  399. 'value1': value1,
  400. 'value2': value2,
  401. 'description': f'文本型数字格式差异: "{value1}" vs "{value2}" (内容相同,空格不同)',
  402. 'severity': 'low'
  403. }
  404. else:
  405. result['match'] = False
  406. result['difference'] = {
  407. 'type': 'table_text',
  408. 'value1': value1,
  409. 'value2': value2,
  410. 'description': f'文本型数字不一致: {value1} vs {value2}',
  411. 'severity': 'high'
  412. }
  413. return result
  414. if column_type == 'numeric':
  415. if self.detector.is_numeric(v1) and self.detector.is_numeric(v2):
  416. num1 = self.detector.parse_number(v1)
  417. num2 = self.detector.parse_number(v2)
  418. if abs(num1 - num2) > 0.01:
  419. result['match'] = False
  420. result['difference'] = {
  421. 'type': 'table_amount',
  422. 'value1': value1,
  423. 'value2': value2,
  424. 'diff_amount': abs(num1 - num2),
  425. 'description': f'金额不一致: {value1} vs {value2}',
  426. 'severity': 'high' # ✅ 修改:金额差异 = high
  427. }
  428. else:
  429. result['match'] = False
  430. result['difference'] = {
  431. 'type': 'table_text',
  432. 'value1': value1,
  433. 'value2': value2,
  434. 'description': f'长数字字符串不一致: {value1} vs {value2}',
  435. 'severity': 'medium' # ✅ 修改:数字字符串差异 = medium
  436. }
  437. elif column_type == 'datetime':
  438. datetime1 = self.detector.extract_datetime(v1)
  439. datetime2 = self.detector.extract_datetime(v2)
  440. if datetime1 != datetime2:
  441. result['match'] = False
  442. result['difference'] = {
  443. 'type': 'table_datetime',
  444. 'value1': value1,
  445. 'value2': value2,
  446. 'description': f'日期时间不一致: {value1} vs {value2}',
  447. 'severity': 'medium' # 日期差异 = medium
  448. }
  449. else:
  450. similarity = self.calculator.calculate_text_similarity(v1, v2)
  451. if similarity < self.content_similarity_threshold:
  452. result['match'] = False
  453. result['difference'] = {
  454. 'type': 'table_text',
  455. 'value1': value1,
  456. 'value2': value2,
  457. 'similarity': similarity,
  458. 'description': f'文本不一致: {value1} vs {value2} (相似度: {similarity:.1f}%)',
  459. 'severity': 'low' if similarity > 80 else 'medium' # 根据相似度判断
  460. }
  461. return result
  462. def compare_tables(self, table1: List[List[str]], table2: List[List[str]]) -> List[Dict]:
  463. """标准表格比较"""
  464. differences = []
  465. max_rows = max(len(table1), len(table2))
  466. for i in range(max_rows):
  467. row1 = table1[i] if i < len(table1) else []
  468. row2 = table2[i] if i < len(table2) else []
  469. max_cols = max(len(row1), len(row2))
  470. for j in range(max_cols):
  471. cell1 = row1[j] if j < len(row1) else ""
  472. cell2 = row2[j] if j < len(row2) else ""
  473. if "[图片内容-忽略]" in cell1 or "[图片内容-忽略]" in cell2:
  474. continue
  475. if cell1 != cell2:
  476. if self.detector.is_numeric(cell1) and self.detector.is_numeric(cell2):
  477. num1 = self.detector.parse_number(cell1)
  478. num2 = self.detector.parse_number(cell2)
  479. if abs(num1 - num2) > 0.001:
  480. differences.append({
  481. 'type': 'table_amount',
  482. 'position': f'行{i+1}列{j+1}',
  483. 'file1_value': cell1,
  484. 'file2_value': cell2,
  485. 'description': f'金额不一致: {cell1} vs {cell2}',
  486. 'severity': 'high', # ✅ 添加:金额差异 = high
  487. 'row_index': i,
  488. 'col_index': j
  489. })
  490. else:
  491. differences.append({
  492. 'type': 'table_text',
  493. 'position': f'行{i+1}列{j+1}',
  494. 'file1_value': cell1,
  495. 'file2_value': cell2,
  496. 'description': f'文本不一致: {cell1} vs {cell2}',
  497. 'severity': 'medium', # ✅ 添加:文本差异 = medium
  498. 'row_index': i,
  499. 'col_index': j
  500. })
  501. return differences
  502. def compare_table_flow_list(self, table1: List[List[str]], table2: List[List[str]]) -> List[Dict]:
  503. """流水列表表格比较算法"""
  504. differences = []
  505. if not table1 or not table2:
  506. return [{
  507. 'type': 'table_empty',
  508. 'description': '表格为空',
  509. 'severity': 'critical'
  510. }]
  511. print(f"\n📋 开始流水表格对比...")
  512. # 检测表头位置
  513. header_row_idx1 = self.detect_table_header_row(table1)
  514. header_row_idx2 = self.detect_table_header_row(table2)
  515. if header_row_idx1 != header_row_idx2:
  516. differences.append({
  517. 'type': 'table_header_position',
  518. 'position': '表头位置',
  519. 'file1_value': f'第{header_row_idx1 + 1}行',
  520. 'file2_value': f'第{header_row_idx2 + 1}行',
  521. 'description': f'表头位置不一致: 文件1在第{header_row_idx1 + 1}行,文件2在第{header_row_idx2 + 1}行',
  522. 'severity': 'high'
  523. })
  524. # 比对表头前的内容
  525. if header_row_idx1 > 0 or header_row_idx2 > 0:
  526. print(f"\n📝 对比表头前的内容...")
  527. pre_header_table1 = table1[:header_row_idx1] if header_row_idx1 > 0 else []
  528. pre_header_table2 = table2[:header_row_idx2] if header_row_idx2 > 0 else []
  529. if pre_header_table1 or pre_header_table2:
  530. pre_header_diffs = self.compare_tables(pre_header_table1, pre_header_table2)
  531. for diff in pre_header_diffs:
  532. diff['type'] = 'table_pre_header'
  533. diff['position'] = f"表头前{diff['position']}"
  534. diff['severity'] = 'medium'
  535. differences.extend(pre_header_diffs)
  536. # 比较表头
  537. headers1 = table1[header_row_idx1]
  538. headers2 = table2[header_row_idx2]
  539. print(f"\n📋 对比表头...")
  540. header_result = self.compare_table_headers(headers1, headers2)
  541. if not header_result['match']:
  542. print(f"\n⚠️ 表头文字存在差异")
  543. for diff in header_result['differences']:
  544. differences.append({
  545. 'type': diff.get('type', 'table_header_mismatch'),
  546. 'position': '表头',
  547. 'file1_value': diff.get('header1', ''),
  548. 'file2_value': diff.get('header2', ''),
  549. 'description': diff['description'],
  550. 'severity': diff.get('severity', 'high'),
  551. })
  552. if diff.get('severity') == 'critical':
  553. return differences
  554. # 检测列类型并比较数据行
  555. column_types1 = self._detect_column_types(table1, header_row_idx1, headers1)
  556. column_types2 = self._detect_column_types(table2, header_row_idx2, headers2)
  557. # 处理列类型不匹配
  558. mismatched_columns = self._check_column_type_mismatch(
  559. column_types1, column_types2, headers1, headers2, differences
  560. )
  561. # 合并列类型
  562. column_types = self._merge_column_types(column_types1, column_types2, mismatched_columns)
  563. # 逐行比较数据
  564. data_diffs = self._compare_data_rows(
  565. table1, table2, header_row_idx1, header_row_idx2,
  566. headers1, column_types, mismatched_columns, header_result['match']
  567. )
  568. differences.extend(data_diffs)
  569. print(f"\n✅ 流水表格对比完成,发现 {len(differences)} 个差异")
  570. return differences
  571. def _detect_column_types(self, table: List[List[str]], header_row_idx: int,
  572. headers: List[str]) -> List[str]:
  573. """检测列类型"""
  574. column_types = []
  575. for col_idx in range(len(headers)):
  576. col_values = [
  577. row[col_idx]
  578. for row in table[header_row_idx + 1:]
  579. if col_idx < len(row)
  580. ]
  581. col_type = self.detector.detect_column_type(col_values)
  582. column_types.append(col_type)
  583. return column_types
  584. def _check_column_type_mismatch(self, column_types1: List[str], column_types2: List[str],
  585. headers1: List[str], headers2: List[str],
  586. differences: List[Dict]) -> List[int]:
  587. """检查列类型不匹配"""
  588. mismatched_columns = []
  589. for col_idx in range(min(len(column_types1), len(column_types2))):
  590. if column_types1[col_idx] != column_types2[col_idx]:
  591. mismatched_columns.append(col_idx)
  592. differences.append({
  593. 'type': 'table_column_type_mismatch',
  594. 'position': f'第{col_idx + 1}列',
  595. 'file1_value': f'{headers1[col_idx]} ({column_types1[col_idx]})',
  596. 'file2_value': f'{headers2[col_idx]} ({column_types2[col_idx]})',
  597. 'description': f'列类型不一致: {column_types1[col_idx]} vs {column_types2[col_idx]}',
  598. 'severity': 'high',
  599. 'column_index': col_idx
  600. })
  601. total_columns = min(len(column_types1), len(column_types2))
  602. mismatch_ratio = len(mismatched_columns) / total_columns if total_columns > 0 else 0
  603. if mismatch_ratio > 0.5:
  604. differences.append({
  605. 'type': 'table_header_critical',
  606. 'position': '表格列类型',
  607. 'file1_value': f'{len(mismatched_columns)}列类型不一致',
  608. 'file2_value': f'共{total_columns}列',
  609. 'description': f'列类型差异过大: {len(mismatched_columns)}/{total_columns}列不匹配 ({mismatch_ratio:.1%})',
  610. 'severity': 'critical'
  611. })
  612. return mismatched_columns
  613. def _merge_column_types(self, column_types1: List[str], column_types2: List[str],
  614. mismatched_columns: List[int]) -> List[str]:
  615. """合并列类型"""
  616. column_types = []
  617. for col_idx in range(max(len(column_types1), len(column_types2))):
  618. if col_idx >= len(column_types1):
  619. column_types.append(column_types2[col_idx])
  620. elif col_idx >= len(column_types2):
  621. column_types.append(column_types1[col_idx])
  622. elif col_idx in mismatched_columns:
  623. type1 = column_types1[col_idx]
  624. type2 = column_types2[col_idx]
  625. if type1 == 'text' or type2 == 'text':
  626. column_types.append('text')
  627. elif type1 == 'text_number' or type2 == 'text_number':
  628. column_types.append('text_number')
  629. else:
  630. column_types.append(type1)
  631. else:
  632. column_types.append(column_types1[col_idx])
  633. return column_types
  634. def _compare_data_rows(self, table1: List[List[str]], table2: List[List[str]],
  635. header_row_idx1: int, header_row_idx2: int,
  636. headers1: List[str], column_types: List[str],
  637. mismatched_columns: List[int], header_match: bool) -> List[Dict]:
  638. """逐行比较数据"""
  639. differences = []
  640. data_rows1 = table1[header_row_idx1 + 1:]
  641. data_rows2 = table2[header_row_idx2 + 1:]
  642. max_rows = max(len(data_rows1), len(data_rows2))
  643. for row_idx in range(max_rows):
  644. row1 = data_rows1[row_idx] if row_idx < len(data_rows1) else []
  645. row2 = data_rows2[row_idx] if row_idx < len(data_rows2) else []
  646. actual_row_num = header_row_idx1 + row_idx + 2
  647. if not row1:
  648. differences.append({
  649. 'type': 'table_row_missing',
  650. 'position': f'第{actual_row_num}行',
  651. 'file1_value': '',
  652. 'file2_value': ', '.join(row2),
  653. 'description': f'文件1缺少第{actual_row_num}行',
  654. 'severity': 'high',
  655. 'row_index': actual_row_num
  656. })
  657. continue
  658. if not row2:
  659. differences.append({
  660. 'type': 'table_row_missing',
  661. 'position': f'第{actual_row_num}行',
  662. 'file1_value': ', '.join(row1),
  663. 'file2_value': '',
  664. 'description': f'文件2缺少第{actual_row_num}行',
  665. 'severity': 'high',
  666. 'row_index': actual_row_num
  667. })
  668. continue
  669. # 逐列比较
  670. max_cols = max(len(row1), len(row2))
  671. for col_idx in range(max_cols):
  672. cell1 = row1[col_idx] if col_idx < len(row1) else ''
  673. cell2 = row2[col_idx] if col_idx < len(row2) else ''
  674. if "[图片内容-忽略]" in cell1 or "[图片内容-忽略]" in cell2:
  675. continue
  676. column_type = column_types[col_idx] if col_idx < len(column_types) else 'text'
  677. column_name = headers1[col_idx] if col_idx < len(headers1) else f'列{col_idx + 1}'
  678. compare_result = self.compare_cell_value(cell1, cell2, column_type, column_name)
  679. if not compare_result['match']:
  680. diff_info = compare_result['difference']
  681. type_mismatch_note = ""
  682. if col_idx in mismatched_columns:
  683. type_mismatch_note = " [列类型冲突]"
  684. # ✅ 确定最终严重度:优先使用 diff_info 的 severity
  685. base_severity = diff_info.get('severity', 'medium')
  686. # 如果列类型冲突,且基础严重度不是 high,则提升到 high
  687. final_severity = 'high' if col_idx in mismatched_columns else base_severity
  688. differences.append({
  689. 'type': diff_info['type'],
  690. 'position': f'第{actual_row_num}行第{col_idx + 1}列',
  691. 'file1_value': diff_info['value1'],
  692. 'file2_value': diff_info['value2'],
  693. 'description': diff_info['description'] + type_mismatch_note,
  694. 'severity': final_severity, # ✅ 使用计算后的严重度
  695. 'row_index': actual_row_num,
  696. 'col_index': col_idx,
  697. 'column_name': column_name,
  698. 'column_type': column_type,
  699. 'column_type_mismatch': col_idx in mismatched_columns,
  700. })
  701. return differences