|
|
@@ -0,0 +1,202 @@
|
|
|
+import difflib
|
|
|
+import json
|
|
|
+from typing import List, Dict
|
|
|
+from compare_ocr_results import OCRResultComparator
|
|
|
+
|
|
|
+class OCRResultComparatorUnifiedDiff(OCRResultComparator):
|
|
|
+ # 继承自OCRResultComparator以复用部分功能
|
|
|
+ def __init__(self):
|
|
|
+ super().__init__()
|
|
|
+
|
|
|
+ def generate_unified_diff(self, paras1: List[str], paras2: List[str], file1_path: str, file2_path: str) -> Dict:
|
|
|
+ """
|
|
|
+ 生成类似git diff的统一差异格式,并返回结构化数据
|
|
|
+ """
|
|
|
+ # 直接调用进行预处理
|
|
|
+ file1_lines = paras1
|
|
|
+ file2_lines = paras2
|
|
|
+
|
|
|
+ # 使用unified_diff生成差异
|
|
|
+ diff = difflib.unified_diff(
|
|
|
+ file1_lines,
|
|
|
+ file2_lines,
|
|
|
+ fromfile=file1_path,
|
|
|
+ tofile=file2_path,
|
|
|
+ lineterm='' # 确保每行末尾不添加额外字符
|
|
|
+ )
|
|
|
+
|
|
|
+ # 将差异生成器转换为列表
|
|
|
+ diff_output = list(diff)
|
|
|
+
|
|
|
+ # 解析diff输出并生成结构化数据
|
|
|
+ structured_diff = self._parse_unified_diff(diff_output, file1_lines, file2_lines, file1_path, file2_path)
|
|
|
+
|
|
|
+ return structured_diff
|
|
|
+
|
|
|
+ def _parse_unified_diff(self, diff_lines: List[str], file1_lines: List[str], file2_lines: List[str],
|
|
|
+ file1_path: str, file2_path: str) -> Dict:
|
|
|
+ """解析unified diff输出并生成结构化数据"""
|
|
|
+ differences = []
|
|
|
+ current_hunk = None
|
|
|
+ file1_line_num = 0
|
|
|
+ file2_line_num = 0
|
|
|
+
|
|
|
+ for line in diff_lines:
|
|
|
+ if line.startswith('---') or line.startswith('+++'):
|
|
|
+ continue
|
|
|
+ elif line.startswith('@@'):
|
|
|
+ # 解析hunk头部,例如: @@ -1,5 +1,4 @@
|
|
|
+ import re
|
|
|
+ match = re.match(r'@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@', line)
|
|
|
+ if match:
|
|
|
+ file1_start = int(match.group(1))
|
|
|
+ file1_count = int(match.group(2)) if match.group(2) else 1
|
|
|
+ file2_start = int(match.group(3))
|
|
|
+ file2_count = int(match.group(4)) if match.group(4) else 1
|
|
|
+
|
|
|
+ current_hunk = {
|
|
|
+ 'file1_start': file1_start,
|
|
|
+ 'file1_count': file1_count,
|
|
|
+ 'file2_start': file2_start,
|
|
|
+ 'file2_count': file2_count
|
|
|
+ }
|
|
|
+ file1_line_num = file1_start - 1 # 转为0基索引
|
|
|
+ file2_line_num = file2_start - 1
|
|
|
+ elif line.startswith(' '):
|
|
|
+ # 未改变的行
|
|
|
+ file1_line_num += 1
|
|
|
+ file2_line_num += 1
|
|
|
+ elif line.startswith('-'):
|
|
|
+ # 文件1中删除的行
|
|
|
+ content = line[1:] # 去掉'-'前缀
|
|
|
+ differences.append({
|
|
|
+ 'type': 'paragraph',
|
|
|
+ 'position': f'段落{file1_line_num + 1}',
|
|
|
+ 'file1_value': content,
|
|
|
+ 'file2_value': "",
|
|
|
+ 'description': '文件1中独有的段落',
|
|
|
+ 'similarity': 0.0,
|
|
|
+ 'severity': 'medium',
|
|
|
+ 'line_number': file1_line_num + 1,
|
|
|
+ 'change_type': 'deletion'
|
|
|
+ })
|
|
|
+ file1_line_num += 1
|
|
|
+ elif line.startswith('+'):
|
|
|
+ # 文件2中添加的行
|
|
|
+ content = line[1:] # 去掉'+'前缀
|
|
|
+ differences.append({
|
|
|
+ 'type': 'paragraph',
|
|
|
+ 'position': f'段落{file2_line_num + 1}',
|
|
|
+ 'file1_value': "",
|
|
|
+ 'file2_value': content,
|
|
|
+ 'description': '文件2中独有的段落',
|
|
|
+ 'similarity': 0.0,
|
|
|
+ 'severity': 'medium',
|
|
|
+ 'line_number': file2_line_num + 1,
|
|
|
+ 'change_type': 'addition'
|
|
|
+ })
|
|
|
+ file2_line_num += 1
|
|
|
+
|
|
|
+ # 计算统计信息
|
|
|
+ stats = {
|
|
|
+ 'total_differences': len(differences),
|
|
|
+ 'table_differences': 0, # diff不包含表格差异
|
|
|
+ 'paragraph_differences': len(differences),
|
|
|
+ 'amount_differences': 0,
|
|
|
+ 'high_severity': len([d for d in differences if d.get('severity') == 'high']),
|
|
|
+ 'medium_severity': len([d for d in differences if d.get('severity') == 'medium']),
|
|
|
+ 'low_severity': len([d for d in differences if d.get('severity') == 'low']),
|
|
|
+ 'deletions': len([d for d in differences if d.get('change_type') == 'deletion']),
|
|
|
+ 'additions': len([d for d in differences if d.get('change_type') == 'addition'])
|
|
|
+ }
|
|
|
+
|
|
|
+ return {
|
|
|
+ 'differences': differences,
|
|
|
+ 'statistics': stats,
|
|
|
+ 'file1_tables': 0,
|
|
|
+ 'file2_tables': 0,
|
|
|
+ 'file1_paragraphs': len(file1_lines),
|
|
|
+ 'file2_paragraphs': len(file2_lines),
|
|
|
+ 'file1_path': file1_path,
|
|
|
+ 'file2_path': file2_path,
|
|
|
+ 'diff_type': 'unified_diff'
|
|
|
+ }
|
|
|
+
|
|
|
+ def generate_unified_diff_report(self, paras1: List[str], paras2: List[str], file1_path: str, file2_path: str, output_file: str):
|
|
|
+ """生成unified diff的JSON和Markdown报告"""
|
|
|
+ # 生成结构化diff数据
|
|
|
+ diff_data = self.generate_unified_diff(paras1, paras2, file1_path, file2_path)
|
|
|
+
|
|
|
+ # 添加时间戳
|
|
|
+ import datetime
|
|
|
+ diff_data['timestamp'] = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
|
|
|
+
|
|
|
+ # 生成JSON报告
|
|
|
+ json_file = f"{output_file}_unified_diff.json"
|
|
|
+ with open(json_file, 'w', encoding='utf-8') as f:
|
|
|
+ json.dump(diff_data, f, ensure_ascii=False, indent=2)
|
|
|
+
|
|
|
+ # 生成Markdown报告
|
|
|
+ md_file = f"{output_file}_unified_diff.md"
|
|
|
+ self._generate_unified_diff_markdown(diff_data, md_file)
|
|
|
+
|
|
|
+ print(f"📄 Unified Diff JSON报告: {json_file}")
|
|
|
+ print(f"📝 Unified Diff Markdown报告: {md_file}")
|
|
|
+
|
|
|
+ return diff_data
|
|
|
+
|
|
|
+ def _generate_unified_diff_markdown(self, diff_data: Dict, output_file: str):
|
|
|
+ """生成unified diff的Markdown报告"""
|
|
|
+ with open(output_file, 'w', encoding='utf-8') as f:
|
|
|
+ f.write("# OCR结果Unified Diff对比报告\n\n")
|
|
|
+
|
|
|
+ # 基本信息
|
|
|
+ f.write("## 基本信息\n\n")
|
|
|
+ f.write(f"- **文件1**: `{diff_data['file1_path']}`\n")
|
|
|
+ f.write(f"- **文件2**: `{diff_data['file2_path']}`\n")
|
|
|
+ f.write(f"- **比较时间**: {diff_data.get('timestamp', 'N/A')}\n")
|
|
|
+ f.write(f"- **对比方式**: Unified Diff\n\n")
|
|
|
+
|
|
|
+ # 统计信息
|
|
|
+ stats = diff_data['statistics']
|
|
|
+ f.write("## 统计信息\n\n")
|
|
|
+ f.write(f"- 总差异数量: **{stats['total_differences']}**\n")
|
|
|
+ f.write(f"- 删除行数: **{stats['deletions']}**\n")
|
|
|
+ f.write(f"- 添加行数: **{stats['additions']}**\n")
|
|
|
+ f.write(f"- 文件1段落数: {diff_data['file1_paragraphs']}\n")
|
|
|
+ f.write(f"- 文件2段落数: {diff_data['file2_paragraphs']}\n\n")
|
|
|
+
|
|
|
+ # 差异详情
|
|
|
+ if diff_data['differences']:
|
|
|
+ f.write("## 差异详情\n\n")
|
|
|
+
|
|
|
+ # 按变更类型分组
|
|
|
+ deletions = [d for d in diff_data['differences'] if d['change_type'] == 'deletion']
|
|
|
+ additions = [d for d in diff_data['differences'] if d['change_type'] == 'addition']
|
|
|
+
|
|
|
+ if deletions:
|
|
|
+ f.write(f"### 🗑️ 删除内容 ({len(deletions)}项)\n\n")
|
|
|
+ for i, diff in enumerate(deletions, 1):
|
|
|
+ f.write(f"**{i}. 第{diff['line_number']}行**\n")
|
|
|
+ f.write(f"```\n{diff['file1_value']}\n```\n\n")
|
|
|
+
|
|
|
+ if additions:
|
|
|
+ f.write(f"### ➕ 新增内容 ({len(additions)}项)\n\n")
|
|
|
+ for i, diff in enumerate(additions, 1):
|
|
|
+ f.write(f"**{i}. 第{diff['line_number']}行**\n")
|
|
|
+ f.write(f"```\n{diff['file2_value']}\n```\n\n")
|
|
|
+
|
|
|
+ # 详细差异表格
|
|
|
+ f.write("## 详细差异列表\n\n")
|
|
|
+ f.write("| 序号 | 类型 | 行号 | 变更类型 | 内容 | 描述 |\n")
|
|
|
+ f.write("| --- | --- | --- | --- | --- | --- |\n")
|
|
|
+
|
|
|
+ for i, diff in enumerate(diff_data['differences'], 1):
|
|
|
+ change_icon = "🗑️" if diff['change_type'] == 'deletion' else "➕"
|
|
|
+ content = diff['file1_value'] if diff['change_type'] == 'deletion' else diff['file2_value']
|
|
|
+ f.write(f"| {i} | {change_icon} | {diff['line_number']} | {diff['change_type']} | ")
|
|
|
+ f.write(f"`{content[:50]}{'...' if len(content) > 50 else ''}` | ")
|
|
|
+ f.write(f"{diff['description']} |\n")
|
|
|
+ else:
|
|
|
+ f.write("## 结论\n\n")
|
|
|
+ f.write("🎉 **完美匹配!没有发现任何差异。**\n\n")
|