compare_use_unified_diff.py 9.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202
  1. import difflib
  2. import json
  3. from typing import List, Dict
  4. from compare_ocr_results import OCRResultComparator
  5. class OCRResultComparatorUnifiedDiff(OCRResultComparator):
  6. # 继承自OCRResultComparator以复用部分功能
  7. def __init__(self):
  8. super().__init__()
  9. def generate_unified_diff(self, paras1: List[str], paras2: List[str], file1_path: str, file2_path: str) -> Dict:
  10. """
  11. 生成类似git diff的统一差异格式,并返回结构化数据
  12. """
  13. # 直接调用进行预处理
  14. file1_lines = paras1
  15. file2_lines = paras2
  16. # 使用unified_diff生成差异
  17. diff = difflib.unified_diff(
  18. file1_lines,
  19. file2_lines,
  20. fromfile=file1_path,
  21. tofile=file2_path,
  22. lineterm='' # 确保每行末尾不添加额外字符
  23. )
  24. # 将差异生成器转换为列表
  25. diff_output = list(diff)
  26. # 解析diff输出并生成结构化数据
  27. structured_diff = self._parse_unified_diff(diff_output, file1_lines, file2_lines, file1_path, file2_path)
  28. return structured_diff
  29. def _parse_unified_diff(self, diff_lines: List[str], file1_lines: List[str], file2_lines: List[str],
  30. file1_path: str, file2_path: str) -> Dict:
  31. """解析unified diff输出并生成结构化数据"""
  32. differences = []
  33. current_hunk = None
  34. file1_line_num = 0
  35. file2_line_num = 0
  36. for line in diff_lines:
  37. if line.startswith('---') or line.startswith('+++'):
  38. continue
  39. elif line.startswith('@@'):
  40. # 解析hunk头部,例如: @@ -1,5 +1,4 @@
  41. import re
  42. match = re.match(r'@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@', line)
  43. if match:
  44. file1_start = int(match.group(1))
  45. file1_count = int(match.group(2)) if match.group(2) else 1
  46. file2_start = int(match.group(3))
  47. file2_count = int(match.group(4)) if match.group(4) else 1
  48. current_hunk = {
  49. 'file1_start': file1_start,
  50. 'file1_count': file1_count,
  51. 'file2_start': file2_start,
  52. 'file2_count': file2_count
  53. }
  54. file1_line_num = file1_start - 1 # 转为0基索引
  55. file2_line_num = file2_start - 1
  56. elif line.startswith(' '):
  57. # 未改变的行
  58. file1_line_num += 1
  59. file2_line_num += 1
  60. elif line.startswith('-'):
  61. # 文件1中删除的行
  62. content = line[1:] # 去掉'-'前缀
  63. differences.append({
  64. 'type': 'paragraph',
  65. 'position': f'段落{file1_line_num + 1}',
  66. 'file1_value': content,
  67. 'file2_value': "",
  68. 'description': '文件1中独有的段落',
  69. 'similarity': 0.0,
  70. 'severity': 'medium',
  71. 'line_number': file1_line_num + 1,
  72. 'change_type': 'deletion'
  73. })
  74. file1_line_num += 1
  75. elif line.startswith('+'):
  76. # 文件2中添加的行
  77. content = line[1:] # 去掉'+'前缀
  78. differences.append({
  79. 'type': 'paragraph',
  80. 'position': f'段落{file2_line_num + 1}',
  81. 'file1_value': "",
  82. 'file2_value': content,
  83. 'description': '文件2中独有的段落',
  84. 'similarity': 0.0,
  85. 'severity': 'medium',
  86. 'line_number': file2_line_num + 1,
  87. 'change_type': 'addition'
  88. })
  89. file2_line_num += 1
  90. # 计算统计信息
  91. stats = {
  92. 'total_differences': len(differences),
  93. 'table_differences': 0, # diff不包含表格差异
  94. 'paragraph_differences': len(differences),
  95. 'amount_differences': 0,
  96. 'high_severity': len([d for d in differences if d.get('severity') == 'high']),
  97. 'medium_severity': len([d for d in differences if d.get('severity') == 'medium']),
  98. 'low_severity': len([d for d in differences if d.get('severity') == 'low']),
  99. 'deletions': len([d for d in differences if d.get('change_type') == 'deletion']),
  100. 'additions': len([d for d in differences if d.get('change_type') == 'addition'])
  101. }
  102. return {
  103. 'differences': differences,
  104. 'statistics': stats,
  105. 'file1_tables': 0,
  106. 'file2_tables': 0,
  107. 'file1_paragraphs': len(file1_lines),
  108. 'file2_paragraphs': len(file2_lines),
  109. 'file1_path': file1_path,
  110. 'file2_path': file2_path,
  111. 'diff_type': 'unified_diff'
  112. }
  113. def generate_unified_diff_report(self, paras1: List[str], paras2: List[str], file1_path: str, file2_path: str, output_file: str):
  114. """生成unified diff的JSON和Markdown报告"""
  115. # 生成结构化diff数据
  116. diff_data = self.generate_unified_diff(paras1, paras2, file1_path, file2_path)
  117. # 添加时间戳
  118. import datetime
  119. diff_data['timestamp'] = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
  120. # 生成JSON报告
  121. json_file = f"{output_file}_unified_diff.json"
  122. with open(json_file, 'w', encoding='utf-8') as f:
  123. json.dump(diff_data, f, ensure_ascii=False, indent=2)
  124. # 生成Markdown报告
  125. md_file = f"{output_file}_unified_diff.md"
  126. self._generate_unified_diff_markdown(diff_data, md_file)
  127. print(f"📄 Unified Diff JSON报告: {json_file}")
  128. print(f"📝 Unified Diff Markdown报告: {md_file}")
  129. return diff_data
  130. def _generate_unified_diff_markdown(self, diff_data: Dict, output_file: str):
  131. """生成unified diff的Markdown报告"""
  132. with open(output_file, 'w', encoding='utf-8') as f:
  133. f.write("# OCR结果Unified Diff对比报告\n\n")
  134. # 基本信息
  135. f.write("## 基本信息\n\n")
  136. f.write(f"- **文件1**: `{diff_data['file1_path']}`\n")
  137. f.write(f"- **文件2**: `{diff_data['file2_path']}`\n")
  138. f.write(f"- **比较时间**: {diff_data.get('timestamp', 'N/A')}\n")
  139. f.write(f"- **对比方式**: Unified Diff\n\n")
  140. # 统计信息
  141. stats = diff_data['statistics']
  142. f.write("## 统计信息\n\n")
  143. f.write(f"- 总差异数量: **{stats['total_differences']}**\n")
  144. f.write(f"- 删除行数: **{stats['deletions']}**\n")
  145. f.write(f"- 添加行数: **{stats['additions']}**\n")
  146. f.write(f"- 文件1段落数: {diff_data['file1_paragraphs']}\n")
  147. f.write(f"- 文件2段落数: {diff_data['file2_paragraphs']}\n\n")
  148. # 差异详情
  149. if diff_data['differences']:
  150. f.write("## 差异详情\n\n")
  151. # 按变更类型分组
  152. deletions = [d for d in diff_data['differences'] if d['change_type'] == 'deletion']
  153. additions = [d for d in diff_data['differences'] if d['change_type'] == 'addition']
  154. if deletions:
  155. f.write(f"### 🗑️ 删除内容 ({len(deletions)}项)\n\n")
  156. for i, diff in enumerate(deletions, 1):
  157. f.write(f"**{i}. 第{diff['line_number']}行**\n")
  158. f.write(f"```\n{diff['file1_value']}\n```\n\n")
  159. if additions:
  160. f.write(f"### ➕ 新增内容 ({len(additions)}项)\n\n")
  161. for i, diff in enumerate(additions, 1):
  162. f.write(f"**{i}. 第{diff['line_number']}行**\n")
  163. f.write(f"```\n{diff['file2_value']}\n```\n\n")
  164. # 详细差异表格
  165. f.write("## 详细差异列表\n\n")
  166. f.write("| 序号 | 类型 | 行号 | 变更类型 | 内容 | 描述 |\n")
  167. f.write("| --- | --- | --- | --- | --- | --- |\n")
  168. for i, diff in enumerate(diff_data['differences'], 1):
  169. change_icon = "🗑️" if diff['change_type'] == 'deletion' else "➕"
  170. content = diff['file1_value'] if diff['change_type'] == 'deletion' else diff['file2_value']
  171. f.write(f"| {i} | {change_icon} | {diff['line_number']} | {diff['change_type']} | ")
  172. f.write(f"`{content[:50]}{'...' if len(content) > 50 else ''}` | ")
  173. f.write(f"{diff['description']} |\n")
  174. else:
  175. f.write("## 结论\n\n")
  176. f.write("🎉 **完美匹配!没有发现任何差异。**\n\n")