quyuan 1 an în urmă
părinte
comite
d2e250ce38

+ 0 - 31
tools/README.MD

@@ -1,31 +0,0 @@
-# 工具脚本使用说明
-
-
-### OCR Badcase Commands
-
-- **Command without badcase output:**
-
-  `python ocr_badcase.py pdf_json_label_0306.json ocr_dataset.json json_files.zip ocr_overall base_data_ocr.json`
-
-- **Command with badcase output:**
-  
-  `python ocr_badcase.py pdf_json_label_0306.json ocr_dataset.json json_files.zip ocr_overall base_data_ocr.json --badcase_path ocr_badcase`
-
-### Text Badcase Commands
-
-- **Command without badcase output:**
-
-    `python text_badcase.py pdf_json_label_0306.json pdf_json_label_0229.json json_files.zip text_overall base_data_text.json`
-
-
-
-- **Command with badcase output:**
-
-    ` python text_badcase.py pdf_json_label_0306.json pdf_json_label_0229.json json_files.zip text_overall base_data_text.json --badcase_path text_badcase`
-
-- **Command with upload to s3:**
-
-  -  add the following arguments to the command 
-
-        `--s3_bucket_name llm-process-pperf --s3_file_directory qa-validate/pdf-datasets/badcase --AWS_ACCESS_KEY Your AK  --AWS_SECRET_KEY Your SK --END_POINT_URL Your Endpoint ` 
-

+ 0 - 87
tools/base_data_ocr.json

@@ -1,87 +0,0 @@
-{
-    "accuracy": 1.0,
-    "precision": 1.0,
-    "recall": 1.0,
-    "f1_score": 1.0,
-    "pdf间的平均编辑距离": 133.10256410256412,
-    "pdf间的平均bleu": 0.28838311595434046,
-    "分段准确率": 0.07220216606498195,
-    "行内公式准确率": {
-        "accuracy": 0.004835727492533068,
-        "precision": 0.008790072388831437,
-        "recall": 0.010634970284641852,
-        "f1_score": 0.009624911535739562
-    },
-    "行内公式编辑距离": 1.6176470588235294,
-    "行内公式bleu": 0.17154724654721457,
-    "行间公式准确率": {
-        "accuracy": 0.08490566037735849,
-        "precision": 0.1836734693877551,
-        "recall": 0.13636363636363635,
-        "f1_score": 0.1565217391304348
-    },
-    "行间公式编辑距离": 113.22222222222223,
-    "行间公式bleu": 0.2531053359913409,
-    "丢弃文本准确率": {
-        "accuracy": 0.00035398230088495576,
-        "precision": 0.0006389776357827476,
-        "recall": 0.0007930214115781126,
-        "f1_score": 0.0007077140835102619
-    },
-    "丢弃文本标签准确率": {
-        "color_background_header_txt_block": {
-            "precision": 0.0,
-            "recall": 0.0,
-            "f1-score": 0.0,
-            "support": 41.0
-        },
-        "header": {
-            "precision": 0.0,
-            "recall": 0.0,
-            "f1-score": 0.0,
-            "support": 4.0
-        },
-        "footnote": {
-            "precision": 1.0,
-            "recall": 0.009708737864077669,
-            "f1-score": 0.019230769230769232,
-            "support": 103.0
-        },
-        "on-table": {
-            "precision": 0.0,
-            "recall": 0.0,
-            "f1-score": 0.0,
-            "support": 665.0
-        },
-        "rotate": {
-            "precision": 0.0,
-            "recall": 0.0,
-            "f1-score": 0.0,
-            "support": 63.0
-        },
-        "on-image": {
-            "precision": 0.0,
-            "recall": 0.0,
-            "f1-score": 0.0,
-            "support": 380.0
-        },
-        "micro avg": {
-            "precision": 1.0,
-            "recall": 0.0007961783439490446,
-            "f1-score": 0.0015910898965791568,
-            "support": 1256.0
-        }
-    },
-    "丢弃图片准确率": {
-        "accuracy": 0.0,
-        "precision": 0.0,
-        "recall": 0.0,
-        "f1_score": 0.0
-    },
-    "丢弃表格准确率": {
-        "accuracy": 0.0,
-        "precision": 0.0,
-        "recall": 0.0,
-        "f1_score": 0.0
-    }
-}

+ 0 - 88
tools/base_data_text.json

@@ -1,88 +0,0 @@
-{
-    "accuracy": 1.0,
-    "precision": 1.0,
-    "recall": 1.0,
-    "f1_score": 1.0,
-    "pdf间的平均编辑距离": 19.82051282051282,
-    "pdf间的平均bleu": 0.9002485609584511,
-    "阅读顺序编辑距离": 0.3176895306859206,
-    "分段准确率": 0.8989169675090253,
-    "行内公式准确率": {
-        "accuracy": 0.9782741738066095,
-        "precision": 0.9782741738066095,
-        "recall": 1.0,
-        "f1_score": 0.9890177880897139
-    },
-    "行内公式编辑距离": 0.0,
-    "行内公式bleu": 0.20340450120213166,
-    "行间公式准确率": {
-        "accuracy": 1.0,
-        "precision": 1.0,
-        "recall": 1.0,
-        "f1_score": 1.0
-    },
-    "行间公式编辑距离": 0.0,
-    "行间公式bleu": 0.3662262622386575,
-    "丢弃文本准确率": {
-        "accuracy": 0.867870036101083,
-        "precision": 0.9064856711915535,
-        "recall": 0.9532117367168914,
-        "f1_score": 0.9292616930807885
-    },
-    "丢弃文本标签准确率": {
-        "color_background_header_txt_block": {
-            "precision": 0.0,
-            "recall": 0.0,
-            "f1-score": 0.0,
-            "support": 41.0
-        },
-        "rotate": {
-            "precision": 1.0,
-            "recall": 0.9682539682539683,
-            "f1-score": 0.9838709677419355,
-            "support": 63.0
-        },
-        "footnote": {
-            "precision": 1.0,
-            "recall": 0.883495145631068,
-            "f1-score": 0.9381443298969072,
-            "support": 103.0
-        },
-        "header": {
-            "precision": 1.0,
-            "recall": 1.0,
-            "f1-score": 1.0,
-            "support": 4.0
-        },
-        "on-image": {
-            "precision": 0.9947643979057592,
-            "recall": 1.0,
-            "f1-score": 0.9973753280839895,
-            "support": 380.0
-        },
-        "on-table": {
-            "precision": 1.0,
-            "recall": 0.9443609022556391,
-            "f1-score": 0.97138437741686,
-            "support": 665.0
-        },
-        "micro avg": {
-            "precision": 0.9982847341337907,
-            "recall": 0.9267515923566879,
-            "f1-score": 0.9611890999174236,
-            "support": 1256.0
-        }
-    },
-    "丢弃图片准确率": {
-        "accuracy": 0.8666666666666667,
-        "precision": 0.9285714285714286,
-        "recall": 0.9285714285714286,
-        "f1_score": 0.9285714285714286
-    },
-    "丢弃表格准确率": {
-        "accuracy": 0,
-        "precision": 0,
-        "recall": 0,
-        "f1_score": 0
-    }
-}

+ 41 - 0
tools/benchmark.py

@@ -0,0 +1,41 @@
+import zipfile
+import os
+import shutil
+code_path = os.environ.get('GITHUB_WORKSPACE')
+pdf_dev_path = "/home/quyuan/data"
+pdf_res_path = "/home/quyuan/code/Magic-PDF/Magic-PDF/Magic-PDF/ci/magic-pdf"
+def test_cli():
+    cmd = 'cd %s && export PYTHONPATH=. && find %s -type f -name "*.pdf" | xargs -I{} python magic_pdf/cli/magicpdf.py  pdf-command  --pdf {}' % (code_path, pdf_dev_path)
+    os.system(cmd)
+    for annotaion_name in os.listdir(os.join(pdf_dev_path, "output")):
+        if annotaion_name.endswith('.pdf'):
+            for pdf_res_path  in os.listdir(pdf_res_path):
+                if ".md" in os.join(pdf_res_path, annotaion_name, "auto"):
+                    prefix = annotaion_name.split('_')[-2]
+                    if not os.path.exists(os.join(pdf_dev_path, prefix)):
+                        os.makedirs(os.join(pdf_dev_path, prefix))
+                        shutil.copy(os.join(pdf_res_path, annotaion_name, "auto", annotaion_name + ".md"), os.join(pdf_dev_path, prefix, annotaion_name + ".md"))
+                   
+
+def calculate_score():
+    cmd = "cd %s && export PYTHONPATH=. && python tools/clean_photo.py --tool_name annotations --download_dir %s" % pdf_dev_path
+    os.system(cmd)
+    cmd = "cd %s && export PYTHONPATH=. && python tools/clean_photo.py --tool_name magicpdf --download_dir %s" % (pdf_dev_path)
+    os.system(cmd)
+    cmd = "cd %s && export PYTHONPATH=. && python tools/markdown_calculate.py --tool_name pdf-command --download_dir %s --results %s" % (pdf_dev_path, os.join(pdf_dev_path, "result.json"))
+    os.system(cmd)
+
+
+def extrat_zip(zip_file_path, extract_to_path):
+    if zipfile.is_zipfile(zip_file_path):
+        with zipfile.ZipFile(zip_file_path, 'r') as zip_ref:
+            zip_ref.extractall(extract_to_path)
+        print(f'Files extracted to {extract_to_path}')
+    else:
+        print(f'{zip_file_path} is not a zip file')
+
+
+if __name__ == "__main__":
+    extrat_zip(os.join(pdf_dev_path, 'output.zip'), os.join(pdf_dev_path,'datasets'))
+    test_cli()
+    calculate_score()

+ 112 - 0
tools/clean_photo.py

@@ -0,0 +1,112 @@
+import pypandoc
+import re  
+import htmltabletomd
+import os  
+import argparse
+import zipfile
+
+parser = argparse.ArgumentParser(description="get tool type")
+parser.add_argument(
+    "--tool_name",
+    type=str,
+    required=True,
+    help="input tool name",
+)
+parser.add_argument(
+    "--download_dir",
+    type=str,
+    required=True,
+    help="input download dir",
+)
+args = parser.parse_args()
+
+def clean_markdown_images(content):  
+    pattern = re.compile(r'!\[[^\]]*\]\([^)]*\)', re.IGNORECASE)  
+    cleaned_content = pattern.sub('', content)   
+    return cleaned_content
+   
+def clean_ocrmath_photo(content):
+    pattern = re.compile(r'\\includegraphics\[.*?\]\{.*?\}', re.IGNORECASE)  
+    cleaned_content = pattern.sub('', content)   
+    return cleaned_content
+
+def convert_html_table_to_md(html_table):  
+    lines = html_table.strip().split('\n')  
+    md_table = ''  
+    if lines and '<tr>' in lines[0]:  
+        in_thead = True  
+        for line in lines:  
+            if '<th>' in line:  
+                cells = re.findall(r'<th>(.*?)</th>', line)  
+                md_table += '| ' + ' | '.join(cells) + ' |\n'  
+                in_thead = False  
+            elif '<td>' in line and not in_thead:  
+                cells = re.findall(r'<td>(.*?)</td>', line)  
+                md_table += '| ' + ' | '.join(cells) + ' |\n'  
+        md_table = md_table.rstrip() + '\n'    
+    return md_table  
+ 
+def convert_latext_to_md(content):  
+    tables = re.findall(r'\\begin\{tabular\}(.*?)\\end\{tabular\}', content, re.DOTALL)  
+    placeholders = []  
+    for table in tables:  
+        placeholder = f"<!-- TABLE_PLACEHOLDER_{len(placeholders)} -->"  
+        replace_str = f"\\begin{{tabular}}{table}cl\\end{{tabular}}"
+        content = content.replace(replace_str, placeholder)  
+        try:
+            pypandoc.convert_text(replace_str,  format="latex", to="md", outputfile="output.md", encoding="utf-8")
+        except:
+            markdown_string = replace_str
+        else: 
+            markdown_string = open('output.md', 'r', encoding='utf-8').read()
+        placeholders.append((placeholder, markdown_string)) 
+    new_content = content  
+    for placeholder, md_table in placeholders:  
+        new_content = new_content.replace(placeholder, md_table)  
+        # 写入文件  
+    return new_content
+
+ 
+def convert_htmltale_to_md(content):  
+    tables = re.findall(r'<table>(.*?)</table>', content, re.DOTALL)  
+    placeholders = []  
+    for table in tables:  
+        placeholder = f"<!-- TABLE_PLACEHOLDER_{len(placeholders)} -->"  
+        content = content.replace(f"<table>{table}</table>", placeholder)  
+        try:
+            convert_table = htmltabletomd.convert_table(table)
+        except:
+            convert_table = table
+        placeholders.append((placeholder,convert_table)) 
+    new_content = content  
+    for placeholder, md_table in placeholders:  
+        new_content = new_content.replace(placeholder, md_table)  
+        # 写入文件  
+    return new_content
+
+def clean_data(prod_type, download_dir):
+    file_type = ["academic_literature", "atlas", "courseware", "colorful_textbook", "historical_documents", "notes", "ordinary_books", "ordinary_exam_paper", "ordinary_textbook", "research_report", "special_exam_paper"]
+    for filetype in file_type:
+        tgt_dir = os.path.join(download_dir, filetype, prod_type, "cleaned")
+        if not os.path.exists(tgt_dir):  
+            os.makedirs(tgt_dir) 
+        source_dir = os.path.join(download_dir, filetype, prod_type)
+        filenames = os.listdir(source_dir)
+        for filename in filenames:
+            if filename.endswith('.md'):
+                input_file = os.path.join(source_dir, filename)
+                output_file = os.path.join(tgt_dir, "cleaned_" + filename)
+                with open(input_file, 'r', encoding='utf-8') as fr:
+                    content = fr.read()
+                    new_content = convert_htmltale_to_md(content)
+                    new_content = clean_markdown_images(new_content)
+                    new_content = clean_ocrmath_photo(new_content)
+                    new_content = convert_latext_to_md(new_content)
+                    with open(output_file, 'w', encoding='utf-8') as fw:
+                        fw.write(new_content)
+
+
+if __name__ == '__main__':
+    tool_type = args.tool_name
+    download_dir = args.download_dir
+    clean_data(tool_type, download_dir)

+ 0 - 51
tools/config_init_to_json.py

@@ -1,51 +0,0 @@
-from loguru import logger
-import json
-import os
-from magic_pdf.config import s3_buckets, s3_clusters, s3_users
-
-
-def get_bucket_configs_dict(buckets, clusters, users):
-    bucket_configs = {}
-    for s3_bucket in buckets.items():
-        bucket_name = s3_bucket[0]
-        bucket_config = s3_bucket[1]
-        cluster, user = bucket_config
-        cluster_config = clusters[cluster]
-        endpoint_key = "outside"
-        endpoints = cluster_config[endpoint_key]
-        endpoint = endpoints[0]
-        user_config = users[user]
-        # logger.info(bucket_name)
-        # logger.info(endpoint)
-        # logger.info(user_config)
-        bucket_config = [user_config["ak"], user_config["sk"], endpoint]
-        bucket_configs[bucket_name] = bucket_config
-
-    return bucket_configs
-
-
-def write_json_to_home(my_dict):
-    # Convert dictionary to JSON
-    json_data = json.dumps(my_dict, indent=4, ensure_ascii=False)
-
-    home_dir = os.path.expanduser("~")
-
-    # Define the output file path
-    output_file = os.path.join(home_dir, "magic-pdf.json")
-
-    # Write JSON data to the output file
-    with open(output_file, "w") as f:
-        f.write(json_data)
-
-    # Print a success message
-    print(f"Dictionary converted to JSON and saved to {output_file}")
-
-
-if __name__ == '__main__':
-    bucket_configs_dict = get_bucket_configs_dict(s3_buckets, s3_clusters, s3_users)
-    logger.info(bucket_configs_dict)
-    config_dict = {
-        "bucket_info": bucket_configs_dict,
-        "temp-output-dir": "/tmp"
-    }
-    write_json_to_home(config_dict)

BIN
tools/json_files.zip


Fișier diff suprimat deoarece este prea mare
+ 0 - 0
tools/json_files/ocr_dataset.json


Fișier diff suprimat deoarece este prea mare
+ 0 - 0
tools/json_files/pdf_json_label_0229.json


Fișier diff suprimat deoarece este prea mare
+ 0 - 0
tools/json_files/pdf_json_label_0306.json


+ 143 - 0
tools/markdown_calculate.py

@@ -0,0 +1,143 @@
+import os  
+from Levenshtein import distance  
+from nltk.translate.bleu_score import sentence_bleu, SmoothingFunction, corpus_bleu
+from nltk.tokenize import word_tokenize  
+import json 
+import re
+import scoring
+import argparse
+
+parser = argparse.ArgumentParser(description="get directory")
+parser.add_argument('--document_types', 
+    nargs='+',
+    choices=["academic_literature", "atlas", "courseware", "colorful_textbook", "historical_documents", "notes", "ordinary_books", "ordinary_exam_paper", "ordinary_textbook", "research_report", "special_exam_paper"], 
+    help='Choose one or more document_types',
+    default=["academic_literature", "atlas", "courseware", "colorful_textbook", "historical_documents", "notes", "ordinary_books", "ordinary_exam_paper", "ordinary_textbook", "research_report", "special_exam_paper"]
+)
+
+parser.add_argument(
+    "--tool_name",
+    type=str,
+    required=True,
+    help="tool name",
+)
+parser.add_argument(
+    "--download_dir",
+    type=str,
+    required=True,
+    help="input download dir",
+)
+parser.add_argument(
+    "--results",
+    type=str,
+    required=True,
+    help="results path(end with .json)",
+)
+args = parser.parse_args()
+fw = open(args.results, 'w+', encoding='utf-8')
+# 初始化列表来存储编辑距离和BLEU分数  
+class Scoring:
+    def __init__(self):
+        self.edit_distances = []
+        self.bleu_scores = []
+        self.sim_scores = []
+        self.filenames = []
+        self.score_dict = {}
+        self.anntion_cnt = 0
+
+    def simple_bleu_score(self, candidate, reference):  
+        candidate_tokens = word_tokenize(candidate)  
+        reference_tokens = word_tokenize(reference) 
+        return sentence_bleu([reference_tokens], candidate_tokens, smoothing_function=SmoothingFunction().method1) 
+
+
+    def preprocess_string(self, s):  
+        sub_enter = re.sub(r'\n+', '\n', s)
+        return re.sub(r'  ', ' ', sub_enter)
+    
+    def calculate_similarity(self, annotion, actual, tool_type):
+        class_dict = {}
+        edit_distances = []
+        bleu_scores = []
+        sim_scores = list()
+        total_file = 0
+        for filename in os.listdir(annotion):  
+            if filename.endswith('.md') and not filename.startswith('.'):  # 忽略隐藏文件  
+                total_file = total_file + 1
+                # 读取A目录中的文件  
+                with open(os.path.join(annotion, filename), 'r', encoding='utf-8') as file_a:  
+                    content_a = file_a.read()
+                self.anntion_cnt = self.anntion_cnt + 1
+                filepath_b = os.path.join(actual, filename)  
+                if os.path.exists(filepath_b):  
+                    with open(filepath_b, 'r', encoding='utf-8') as file_b:  
+                        content_b = file_b.read()
+                        self.filenames.append(filename)
+                        # 计算编辑距离
+                        edit_dist = distance(self.preprocess_string(content_b),self.preprocess_string(content_a)) / max(len(content_a), len(content_b))
+                        self.edit_distances.append(edit_dist)  
+                        edit_distances.append(edit_dist)
+                        #计算BLUE分数
+                        bleu_score = self.simple_bleu_score(content_b, content_a)  
+                        bleu_scores.append(bleu_score)
+                        self.bleu_scores.append(bleu_score)  
+                        #计算marker分数
+                        score = scoring.score_text(content_b, content_a)
+                        sim_scores.append(score)
+                        self.sim_scores.append(score)
+                        class_dict[filename] = {"edit_dist": edit_dist, "bleu_score": bleu_score, "sim_score": score}
+                        self.score_dict[filename] = {"edit_dist": edit_dist, "bleu_score": bleu_score, "sim_score": score}
+                else:  
+                    print(f"File {filename} not found in actual directory.")  
+        # 计算每类平均值
+        class_average_edit_distance = sum(edit_distances) / len(edit_distances) if edit_distances else 0  
+        class_average_bleu_score = sum(bleu_scores) / len(bleu_scores) if bleu_scores else 0  
+        class_average_sim_score = sum(sim_scores) / len(sim_scores) if sim_scores else 0
+        fw.write(json.dumps(class_dict, ensure_ascii=False) + "\n")
+        ratio = len(class_dict)/total_file
+        fw.write(f"{tool_type} extract ratio:  {ratio}" + "\n")
+        fw.write(f"{tool_type} Average Levenshtein Distance: {class_average_edit_distance}" + "\n")
+        fw.write(f"{tool_type} Average BLEU Score: {class_average_bleu_score}" + "\n")
+        fw.write(f"{tool_type} Average Sim Score: {class_average_sim_score}" + "\n")
+
+        print (f"{tool_type} extract ratio: {ratio}")
+        print (f"{tool_type} Average Levenshtein Distance: {class_average_edit_distance}")
+        print (f"{tool_type} Average BLEU Score: {class_average_bleu_score}")
+        print (f"{tool_type} Average Sim Score: {class_average_sim_score}")
+        return self.score_dict
+    def summary_scores(self):
+         # 计算整体平均值
+        average_edit_distance = sum(self.edit_distances) / len(self.edit_distances) if self.edit_distances else 0  
+        average_bleu_score = sum(self.bleu_scores) / len(self.bleu_scores) if self.bleu_scores else 0  
+        average_sim_score = sum(self.sim_scores) / len(self.sim_scores) if self.sim_scores else 0
+        #self.fw.write(json.dumps(self.score_dict, ensure_ascii=False) + "\n")
+        fw.write(f"Overall extract cnt: {len(self.score_dict)/self.anntion_cnt}" + "\n")
+        fw.write(f"Overall Average Levenshtein Distance: {average_edit_distance}" + "\n")
+        fw.write(f"Overall Average BLEU Score: {average_bleu_score}" + "\n")
+        fw.write(f"Overall Average Marker Score: {average_sim_score}" + "\n") 
+        print ("Overall extract ratio: ", len(self.score_dict)/self.anntion_cnt)
+        print (f"Overall Average Levenshtein Distance: {average_edit_distance}")
+        print (f"Overall Average BLEU Score: {average_bleu_score}")
+        print (f"Overall Average Marker Score: {average_sim_score}")
+        fw.close()
+
+    def calculate_similarity_total(self, tool_type, file_types, download_dir):
+        for file_type in file_types:
+            annotion = os.path.join(download_dir, file_type, "annotations", "cleaned")
+            actual = os.path.join(download_dir, file_type, tool_type, "cleaned")
+            self.calculate_similarity(annotion, actual, file_type)
+
+if __name__ == "__main__":  
+  file_types = list()
+  tool_type =args.tool_name
+  download_dir = args.download_dir
+  if args.document_types:
+    print("Selected types:", args.document_types)
+    for type_ in args.document_types:
+        file_types.append(type_)
+  else:
+      print("No types selected")
+  print(f"Type {file_types} is selected. Executing related operations...")
+  score = Scoring()
+  score.calculate_similarity_total(tool_type, file_types, download_dir)
+  score.summary_scores()

+ 0 - 895
tools/ocr_badcase.py

@@ -1,895 +0,0 @@
-import json
-import pandas as pd
-import numpy as np
-from nltk.translate.bleu_score import sentence_bleu, SmoothingFunction
-import argparse
-import os
-from sklearn.metrics import classification_report
-from sklearn import metrics
-from datetime import datetime
-import boto3
-from botocore.exceptions import NoCredentialsError, ClientError
-from io import TextIOWrapper
-import zipfile
-
-
-
-def process_equations_and_blocks(json_data, is_standard):
-    """
-    处理JSON数据,提取公式、文本块、图片块和表格块的边界框和文本信息。
-    
-    参数:
-    - json_data: 列表,包含标准文档或测试文档的JSON数据。
-    - is_standard: 布尔值,指示处理的数据是否为标准文档。
-    
-    返回:
-    - 字典,包含处理后的数据。
-    """
-    equations_bboxs = {"inline": [], "interline": []}
-    equations_texts = {"inline": [], "interline": []}
-    dropped_bboxs = {"text": [], "image": [], "table": []}
-    dropped_tags = {"text": []}
-    para_texts = []
-    para_nums = []
-
-    for i in json_data:
-        mid_json = pd.DataFrame(i).iloc[:,:-1] if is_standard else pd.DataFrame(i)
-        page_data = {
-            "equations_bboxs_list": {"inline": [], "interline": []},
-            "equations_texts_list": {"inline": [], "interline": []},
-            "dropped_bboxs_list": {"text": [], "image": [], "table": []},
-            "dropped_tags_list": {"text": []},
-            "para_texts_list": [],
-            "para_nums_list": []
-        }
-
-        for eq_type in ["inline", "interline"]:
-            for equations in mid_json.loc[f"{eq_type}_equations", :]:
-                bboxs = [eq['bbox'] for eq in equations]
-                texts = [eq.get('latex_text' if is_standard else 'content', '') for eq in equations]
-                page_data["equations_bboxs_list"][eq_type].append(bboxs)
-                page_data["equations_texts_list"][eq_type].append(texts)
-        
-        equations_bboxs["inline"].append(page_data["equations_bboxs_list"]["inline"])
-        equations_bboxs["interline"].append(page_data["equations_bboxs_list"]["interline"])
-        equations_texts["inline"].append(page_data["equations_texts_list"]["inline"])
-        equations_texts["interline"].append(page_data["equations_texts_list"]["interline"])
-
-
-        # 提取丢弃的文本块信息
-        for dropped_text_blocks in mid_json.loc['droped_text_block',:]:
-            bboxs, tags = [], []
-            for block in dropped_text_blocks:
-                bboxs.append(block['bbox'])
-                tags.append(block.get('tag', 'None'))
-            
-            page_data["dropped_bboxs_list"]["text"].append(bboxs)
-            page_data["dropped_tags_list"]["text"].append(tags)
-        
-        dropped_bboxs["text"].append(page_data["dropped_bboxs_list"]["text"])
-        dropped_tags["text"].append(page_data["dropped_tags_list"]["text"])
-
-
-      
-        # 同时处理删除的图片块和表格块
-        for block_type in ['image', 'table']:
-            # page_blocks_list = []
-            for blocks in mid_json.loc[f'droped_{block_type}_block', :]:
-                # 如果是标准数据,直接添加整个块的列表
-                if is_standard:
-                    page_data["dropped_bboxs_list"][block_type].append(blocks)
-                # 如果是测试数据,检查列表是否非空,并提取每个块的边界框
-                else:
-                    page_blocks = [block['bbox'] for block in blocks] if blocks else []
-                    page_data["dropped_bboxs_list"][block_type].append(page_blocks)
-            
-        # 将当前页面的块边界框列表添加到结果字典中
-        dropped_bboxs['image'].append(page_data["dropped_bboxs_list"]['image'])
-        dropped_bboxs['table'].append(page_data["dropped_bboxs_list"]['table'])
-        
-        
-        # 处理段落
-        for para_blocks in mid_json.loc['para_blocks', :]:
-            page_data["para_nums_list"].append(len(para_blocks))  # 计算段落数
-
-            for para_block in para_blocks:
-                if is_standard:
-                    # 标准数据直接提取文本
-                    page_data["para_texts_list"].append(para_block['text'])
-                else:
-                    # 测试数据可能需要检查'content'是否存在
-                    if 'spans' in para_block[0] and para_block[0]['spans'][0]['type'] == 'text':
-                        page_data["para_texts_list"].append(para_block[0]['spans'][0].get('content', ''))
-            
-            
-        
-        para_texts.append(page_data["para_texts_list"])
-        para_nums.append(page_data["para_nums_list"])
-
-    return {
-        "equations_bboxs": equations_bboxs,
-        "equations_texts": equations_texts,
-        "dropped_bboxs": dropped_bboxs,
-        "dropped_tags": dropped_tags,
-        "para_texts": para_texts,
-        "para_nums": para_nums
-    }
-
-
-
-
-
-
-
-def bbox_match_indicator_general(test_bboxs_list, standard_bboxs_list):
-    """
-    计算边界框匹配指标,支持掉落的表格、图像和文本块。
-    此版本的函数专注于计算基于边界框的匹配指标,而不涉及标签匹配逻辑。
-    
-    参数:
-    - test_bboxs: 测试集的边界框列表,按页面组织。
-    - standard_bboxs: 标准集的边界框列表,按页面组织。
-
-    返回:
-    - 一个字典,包含准确度、精确度、召回率和F1分数。
-    """
-        # 如果两个列表都完全为空,返回0值指标
-    if all(len(page) == 0 for page in test_bboxs_list) and all(len(page) == 0 for page in standard_bboxs_list):
-        return {'accuracy': 0, 'precision': 0, 'recall': 0, 'f1_score': 0}
-    
-
-    matched_bbox = []
-    matched_standard_bbox = []
-
-    for test_page, standard_page in zip(test_bboxs_list, standard_bboxs_list):
-        test_page_bbox, standard_page_bbox = [], []
-        for standard_bbox in standard_page:
-            if len(standard_bbox) != 4:
-                continue
-            matched = False
-            for test_bbox in test_page:
-                if len(test_bbox) == 4 and bbox_offset(standard_bbox, test_bbox):
-                    matched = True
-                    break
-            test_page_bbox.append(int(matched))
-            standard_page_bbox.append(1)
-
-        # 后处理以处理多删情况,保持原逻辑不变
-        diff_num = len(test_page) + test_page_bbox.count(0) - len(standard_page)
-        if diff_num > 0:
-            test_page_bbox.extend([1] * diff_num)
-            standard_page_bbox.extend([0] * diff_num)
-
-        matched_bbox.extend(test_page_bbox)
-        matched_standard_bbox.extend(standard_page_bbox)
-
-    block_report = {
-        'accuracy': metrics.accuracy_score(matched_standard_bbox, matched_bbox),
-        'precision': metrics.precision_score(matched_standard_bbox, matched_bbox, zero_division=0),
-        'recall': metrics.recall_score(matched_standard_bbox, matched_bbox, zero_division=0),
-        'f1_score': metrics.f1_score(matched_standard_bbox, matched_bbox, zero_division=0)
-    }
-
-    return block_report
-
-
-
-
-
-
-def bbox_offset(b_t, b_s):
-    """
-    判断两个边界框(bounding box)之间的重叠程度是否符合给定的标准。
-    
-    参数:
-    - b_t: 测试文档中的边界框(bbox),格式为(x1, y1, x2, y2),
-           其中(x1, y1)是左上角的坐标,(x2, y2)是右下角的坐标。
-    - b_s: 标准文档中的边界框(bbox),格式同上。
-    
-    返回:
-    - True: 如果两个边界框的重叠面积与两个边界框合计面积的差的比例超过0.95,
-            表明它们足够接近。
-    - False: 否则,表示两个边界框不足够接近。
-    
-    注意:
-    - 函数首先计算两个bbox的交集区域,如果这个区域的面积相对于两个bbox的面积差非常大,
-      则认为这两个bbox足够接近。
-    - 如果交集区域的计算结果导致无效区域(比如宽度或高度为负值),或者分母为0(即两个bbox完全不重叠),
-      则函数会返回False。
-    """
-
-    # 分别提取两个bbox的坐标
-    x1_t, y1_t, x2_t, y2_t = b_t
-    x1_s, y1_s, x2_s, y2_s = b_s
-  
-    # 计算两个bbox交集区域的坐标
-    x1 = max(x1_t, x1_s)
-    x2 = min(x2_t, x2_s)
-    y1 = max(y1_t, y1_s)
-    y2 = min(y2_t, y2_s)
-    
-    # 如果计算出的交集区域有效,则计算其面积
-    if x2 > x1 and y2 > y1:
-        area_overlap = (x2 - x1) * (y2 - y1)
-    else:
-        # 交集区域无效,视为无重叠
-        area_overlap = 0
-
-    # 计算两个bbox的总面积,减去重叠部分避免重复计算
-    area_t = (x2_t - x1_t) * (y2_t - y1_t) + (x2_s - x1_s) * (y2_s - y1_s) - area_overlap
-
-    # 判断重叠面积是否符合标准
-    
-    if area_t-area_overlap==0 or area_overlap/area_t>0.95:
-        return True
-    else:
-        return False
-    
-
-def Levenshtein_Distance(str1, str2):
-    """
-    计算并返回两个字符串之间的Levenshtein编辑距离。
-    
-    参数:
-    - str1: 字符串,第一个比较字符串。
-    - str2: 字符串,第二个比较字符串。
-    
-    返回:
-    - int: str1和str2之间的Levenshtein距离。
-    
-    方法:
-    - 使用动态规划构建一个矩阵(matrix),其中matrix[i][j]表示str1的前i个字符和str2的前j个字符之间的Levenshtein距离。
-    - 矩阵的初始值设定为边界情况,即一个字符串与空字符串之间的距离。
-    - 遍历矩阵填充每个格子的值,根据字符是否相等选择插入、删除或替换操作的最小代价。
-    """
-    # 初始化矩阵,大小为(len(str1)+1) x (len(str2)+1),边界情况下的距离为i和j
-    matrix = [[i + j for j in range(len(str2) + 1)] for i in range(len(str1) + 1)]
-
-    # 遍历str1和str2的每个字符,更新矩阵中的值
-    for i in range(1, len(str1) + 1):
-        for j in range(1, len(str2) + 1):
-            # 如果当前字符相等,替换代价为0;否则为1
-            d = 0 if (str1[i - 1] == str2[j - 1]) else 1
-            # 更新当前位置的值为从str1[i]转换到str2[j]的最小操作数
-            matrix[i][j] = min(matrix[i - 1][j] + 1,  # 删除操作
-                               matrix[i][j - 1] + 1,  # 插入操作
-                               matrix[i - 1][j - 1] + d)  # 替换操作
-    # 返回右下角的值,即str1和str2之间的Levenshtein距离
-    return matrix[len(str1)][len(str2)]
-
-
-def equations_indicator(test_equations_bboxs, standard_equations_bboxs, test_equations, standard_equations):
-    """
-    根据边界框匹配的方程计算编辑距离和BLEU分数。
-    
-    参数:
-    - test_equations_bboxs: 测试方程的边界框列表。
-    - standard_equations_bboxs: 标准方程的边界框列表。
-    - test_equations: 测试方程的列表。
-    - standard_equations: 标准方程的列表。
-    
-    返回:
-    - 一个元组,包含匹配方程的平均Levenshtein编辑距离和BLEU分数。
-    """
-    
-    # 初始化匹配方程列表
-    test_match_equations = []
-    standard_match_equations = []
-
-    # 匹配方程基于边界框重叠
-    for index, (test_bbox, standard_bbox) in enumerate(zip(test_equations_bboxs, standard_equations_bboxs)):
-        if not (test_bbox and standard_bbox):  # 跳过任一空列表
-            continue
-        for i, sb in enumerate(standard_bbox):
-            for j, tb in enumerate(test_bbox):
-                if bbox_offset(sb, tb):
-                    standard_match_equations.append(standard_equations[index][i])
-                    test_match_equations.append(test_equations[index][j])
-                    break  # 找到第一个匹配后即跳出循环
-
-    # 使用Levenshtein距离和BLEU分数计算编辑距离
-    dis = [Levenshtein_Distance(a, b) for a, b in zip(test_match_equations, standard_match_equations) if a and b]
-    # 应用平滑函数计算BLEU分数
-    sm_func = SmoothingFunction().method1
-    bleu = [sentence_bleu([a.split()], b.split(), smoothing_function=sm_func) for a, b in zip(test_match_equations, standard_match_equations) if a and b]
-
-    # 计算平均编辑距离和BLEU分数,处理空列表情况
-    equations_edit = np.mean(dis) if dis else float('0.0')
-    equations_bleu = np.mean(bleu) if bleu else float('0.0')
-
-    return equations_edit, equations_bleu
-
-
-
-def bbox_match_indicator_general(test_bboxs_list, standard_bboxs_list):
-    """
-    计算边界框匹配指标,支持掉落的表格、图像和文本块。
-    此版本的函数专注于计算基于边界框的匹配指标,而不涉及标签匹配逻辑。
-    
-    参数:
-    - test_bboxs: 测试集的边界框列表,按页面组织。
-    - standard_bboxs: 标准集的边界框列表,按页面组织。
-
-    返回:
-    - 一个字典,包含准确度、精确度、召回率和F1分数。
-    """
-        # 如果两个列表都完全为空,返回0值指标
-    if all(len(page) == 0 for page in test_bboxs_list) and all(len(page) == 0 for page in standard_bboxs_list):
-        return {'accuracy': 0, 'precision': 0, 'recall': 0, 'f1_score': 0}
-    
-
-    matched_bbox = []
-    matched_standard_bbox = []
-
-    for test_page, standard_page in zip(test_bboxs_list, standard_bboxs_list):
-        test_page_bbox, standard_page_bbox = [], []
-        for standard_bbox in standard_page:
-            if len(standard_bbox) != 4:
-                continue
-            matched = False
-            for test_bbox in test_page:
-                if len(test_bbox) == 4 and bbox_offset(standard_bbox, test_bbox):
-                    matched = True
-                    break
-            test_page_bbox.append(int(matched))
-            standard_page_bbox.append(1)
-
-        # 后处理以处理多删情况,保持原逻辑不变
-        diff_num = len(test_page) + test_page_bbox.count(0) - len(standard_page)
-        if diff_num > 0:
-            test_page_bbox.extend([1] * diff_num)
-            standard_page_bbox.extend([0] * diff_num)
-
-        matched_bbox.extend(test_page_bbox)
-        matched_standard_bbox.extend(standard_page_bbox)
-
-    block_report = {
-        'accuracy': metrics.accuracy_score(matched_standard_bbox, matched_bbox),
-        'precision': metrics.precision_score(matched_standard_bbox, matched_bbox, zero_division=0),
-        'recall': metrics.recall_score(matched_standard_bbox, matched_bbox, zero_division=0),
-        'f1_score': metrics.f1_score(matched_standard_bbox, matched_bbox, zero_division=0)
-    }
-
-    return block_report
-
-
-def bbox_match_indicator_dropped_text_block(test_dropped_text_bboxs, standard_dropped_text_bboxs, standard_dropped_text_tag, test_dropped_text_tag):
-    """
-    计算丢弃文本块的边界框匹配相关指标,包括准确率、精确率、召回率和F1分数,
-    同时也计算文本块标签的匹配指标。
-
-    参数:
-    - test_dropped_text_bboxs: 测试集的丢弃文本块边界框列表
-    - standard_dropped_text_bboxs: 标准集的丢弃文本块边界框列表
-    - standard_dropped_text_tag: 标准集的丢弃文本块标签列表
-    - test_dropped_text_tag: 测试集的丢弃文本块标签列表
-
-    返回:
-    - 一个包含边界框匹配指标和文本块标签匹配指标的元组
-    """
-    test_text_bbox, standard_text_bbox = [], []
-    test_tag, standard_tag = [], []
-
-    for index, (test_page, standard_page) in enumerate(zip(test_dropped_text_bboxs, standard_dropped_text_bboxs)):
-        # 初始化每个页面的结果列表
-        test_page_tag, standard_page_tag = [], []
-        test_page_bbox, standard_page_bbox = [], []
-
-        for i, standard_bbox in enumerate(standard_page):
-            matched = False
-            for j, test_bbox in enumerate(test_page):
-                if bbox_offset(standard_bbox, test_bbox):
-                    # 匹配成功,记录标签和边界框匹配结果
-                    matched = True
-                    test_page_tag.append(test_dropped_text_tag[index][j])
-                    test_page_bbox.append(1)
-                    break
-
-            if not matched:
-                # 未匹配,记录'None'和边界框未匹配结果
-                test_page_tag.append('None')
-                test_page_bbox.append(0)
-
-            # 标准边界框和标签总是被视为匹配的
-            standard_page_tag.append(standard_dropped_text_tag[index][i])
-            standard_page_bbox.append(1)
-
-        # 处理可能的多删情况
-        handle_multi_deletion(test_page, test_page_tag, test_page_bbox, standard_page_tag, standard_page_bbox)
-
-        # 合并当前页面的结果到整体结果中
-        test_tag.extend(test_page_tag)
-        standard_tag.extend(standard_page_tag)
-        test_text_bbox.extend(test_page_bbox)
-        standard_text_bbox.extend(standard_page_bbox)
-
-    # 计算和返回匹配指标
-    text_block_report = {
-        'accuracy': metrics.accuracy_score(standard_text_bbox, test_text_bbox),
-        'precision': metrics.precision_score(standard_text_bbox, test_text_bbox, zero_division=0),
-        'recall': metrics.recall_score(standard_text_bbox, test_text_bbox, zero_division=0),
-        'f1_score': metrics.f1_score(standard_text_bbox, test_text_bbox, zero_division=0)
-    }
-
-    # 计算和返回标签匹配指标
-    text_block_tag_report = classification_report(y_true=standard_tag, y_pred=test_tag, labels=list(set(standard_tag) - {'None'}), output_dict=True, zero_division=0)
-    del text_block_tag_report["macro avg"]
-    del text_block_tag_report["weighted avg"]
-    
-    return text_block_report, text_block_tag_report
-
-def handle_multi_deletion(test_page, test_page_tag, test_page_bbox, standard_page_tag, standard_page_bbox):
-    """
-    处理多删情况,即测试页面的边界框或标签数量多于标准页面。
-    """
-    excess_count = len(test_page) + test_page_bbox.count(0) - len(standard_page_tag)
-    if excess_count > 0:
-        # 对于多出的项,将它们视为正确匹配的边界框,但标签视为'None'
-        test_page_bbox.extend([1] * excess_count)
-        standard_page_bbox.extend([0] * excess_count)
-        test_page_tag.extend(['None'] * excess_count)
-        standard_page_tag.extend(['None'] * excess_count)
-
-
-
-
-
-
-def consolidate_data(test_data, standard_data, key_path):
-    """
-    Consolidates data from test and standard datasets based on the provided key path.
-    
-    :param test_data: Dictionary containing the test dataset.
-    :param standard_data: Dictionary containing the standard dataset.
-    :param key_path: List of keys leading to the desired data within the dictionaries.
-    :return: List containing all items from both test and standard data at the specified key path.
-    """
-    # Initialize an empty list to hold the consolidated data
-    overall_data_standard = []
-    overall_data_test = []
-    
-    # Helper function to recursively navigate through the dictionaries based on the key path
-    def extract_data(source_data, keys):
-        for key in keys[:-1]:
-            source_data = source_data.get(key, {})
-        return source_data.get(keys[-1], [])
-    
-    for data in extract_data(standard_data, key_path):
-    # 假设每个 single_table_tags 已经是一个列表,直接将它的元素添加到总列表中
-        overall_data_standard.extend(data)
-    
-    for data in extract_data(test_data, key_path):
-         overall_data_test.extend(data)
-    # Extract and extend the overall data list with items from both test and standard datasets
-
-    
-    return overall_data_standard, overall_data_test
-
-def overall_calculate_metrics(inner_merge, json_test, json_standard,standard_exist, test_exist):
-    """
-    计算整体的指标,包括准确率、精确率、召回率、F1值、平均编辑距离、平均BLEU得分、分段准确率、公式准确率、公式编辑距离、公式BLEU、丢弃文本准确率、丢弃文本标签准确率、丢弃图片准确率、丢弃表格准确率等。
-    
-    Args:
-        inner_merge (dict): 包含merge信息的字典,包括pass_label和id等信息。
-        json_test (dict): 测试集的json数据。
-        json_standard (dict): 标准集的json数据。
-        standard_exist (list): 标准集中存在的id列表。
-        test_exist (list): 测试集中存在的id列表。
-    
-    Returns:
-        dict: 包含整体指标值的字典。
-    
-    """
-
-    process_data_standard = process_equations_and_blocks(json_standard, is_standard=True)
-    process_data_test = process_equations_and_blocks(json_test, is_standard=False)
-
-
-    overall_report = {}
-    overall_report['accuracy']=metrics.accuracy_score(standard_exist,test_exist)
-    overall_report['precision']=metrics.precision_score(standard_exist,test_exist)
-    overall_report['recall']=metrics.recall_score(standard_exist,test_exist)
-    overall_report['f1_score']=metrics.f1_score(standard_exist,test_exist)
-    overall_report
-
-    test_para_text = np.asarray(process_data_test['para_texts'], dtype=object)[inner_merge['pass_label'] == 'yes']
-    standard_para_text = np.asarray(process_data_standard['para_texts'], dtype=object)[inner_merge['pass_label'] == 'yes']
-    ids_yes = inner_merge['id'][inner_merge['pass_label'] == 'yes'].tolist()
-
-    pdf_dis = {}
-    pdf_bleu = {}
-
-    # 对pass_label为'yes'的数据计算编辑距离和BLEU得分
-    for idx,(a, b, id) in enumerate(zip(test_para_text, standard_para_text, ids_yes)):
-        a1 = ''.join(a)
-        b1 = ''.join(b)
-        pdf_dis[id] = Levenshtein_Distance(a, b)
-        pdf_bleu[id] = sentence_bleu([a1], b1)
-
-    overall_report['pdf间的平均编辑距离'] = np.mean(list(pdf_dis.values()))
-    overall_report['pdf间的平均bleu'] = np.mean(list(pdf_bleu.values()))
-
-    # Consolidate equations bboxs inline
-    overall_equations_bboxs_inline_standard,overall_equations_bboxs_inline_test = consolidate_data(process_data_test, process_data_standard, ["equations_bboxs", "inline"])
-
-    # # Consolidate equations texts inline
-    overall_equations_texts_inline_standard,overall_equations_texts_inline_test = consolidate_data(process_data_test, process_data_standard, ["equations_texts", "inline"])
-
-    # Consolidate equations bboxs interline
-    overall_equations_bboxs_interline_standard,overall_equations_bboxs_interline_test = consolidate_data(process_data_test, process_data_standard, ["equations_bboxs", "interline"])
-
-    # Consolidate equations texts interline
-    overall_equations_texts_interline_standard,overall_equations_texts_interline_test = consolidate_data(process_data_test, process_data_standard, ["equations_texts", "interline"])
-
-    overall_dropped_bboxs_text_standard,overall_dropped_bboxs_text_test = consolidate_data(process_data_test, process_data_standard, ["dropped_bboxs","text"])
-
-    overall_dropped_tags_text_standard,overall_dropped_tags_text_test = consolidate_data(process_data_test, process_data_standard, ["dropped_tags","text"])
-
-    overall_dropped_bboxs_image_standard,overall_dropped_bboxs_image_test = consolidate_data(process_data_test, process_data_standard, ["dropped_bboxs","image"])
-
-
-    overall_dropped_bboxs_table_standard,overall_dropped_bboxs_table_test=consolidate_data(process_data_test, process_data_standard,["dropped_bboxs","table"])
-
-
-    para_nums_test = process_data_test['para_nums']
-    para_nums_standard=process_data_standard['para_nums']
-    overall_para_nums_standard = [item for sublist in para_nums_standard for item in (sublist if isinstance(sublist, list) else [sublist])]
-    overall_para_nums_test = [item for sublist in para_nums_test for item in (sublist if isinstance(sublist, list) else [sublist])]
-
-
-    test_para_num=np.array(overall_para_nums_test)
-    standard_para_num=np.array(overall_para_nums_standard)
-    acc_para=np.mean(test_para_num==standard_para_num)
-
-
-    overall_report['分段准确率'] = acc_para
-
-    # 行内公式准确率和编辑距离、bleu
-    overall_report['行内公式准确率'] = bbox_match_indicator_general(
-        overall_equations_bboxs_inline_test,
-        overall_equations_bboxs_inline_standard)
-
-    overall_report['行内公式编辑距离'], overall_report['行内公式bleu'] = equations_indicator(
-        overall_equations_bboxs_inline_test,
-        overall_equations_bboxs_inline_standard,
-        overall_equations_texts_inline_test,
-        overall_equations_texts_inline_standard)
-
-    # 行间公式准确率和编辑距离、bleu
-    overall_report['行间公式准确率'] = bbox_match_indicator_general(
-        overall_equations_bboxs_interline_test,
-        overall_equations_bboxs_interline_standard)
-
-    overall_report['行间公式编辑距离'], overall_report['行间公式bleu'] = equations_indicator(
-        overall_equations_bboxs_interline_test,
-        overall_equations_bboxs_interline_standard,
-        overall_equations_texts_interline_test,
-        overall_equations_texts_interline_standard)
-
-    # 丢弃文本准确率,丢弃文本标签准确率
-    overall_report['丢弃文本准确率'], overall_report['丢弃文本标签准确率'] = bbox_match_indicator_dropped_text_block(
-        overall_dropped_bboxs_text_test,
-        overall_dropped_bboxs_text_standard,
-        overall_dropped_tags_text_standard,
-        overall_dropped_tags_text_test)
-
-    # 丢弃图片准确率
-    overall_report['丢弃图片准确率'] = bbox_match_indicator_general(
-        overall_dropped_bboxs_image_test,
-        overall_dropped_bboxs_image_standard)
-
-    # 丢弃表格准确率
-    overall_report['丢弃表格准确率'] = bbox_match_indicator_general(
-        overall_dropped_bboxs_table_test,
-        overall_dropped_bboxs_table_standard)
-
-    return overall_report
-
-
-
-def calculate_metrics(inner_merge, json_test, json_standard, json_standard_origin):
-    """
-    计算指标
-    """
-    # 创建ID到file_id的映射
-    id_to_file_id_map = pd.Series(json_standard_origin.file_id.values, index=json_standard_origin.id).to_dict()
-
-    # 处理标准数据和测试数据
-    process_data_standard = process_equations_and_blocks(json_standard, is_standard=True)
-    process_data_test = process_equations_and_blocks(json_test, is_standard=False)
-
-    # 从inner_merge中筛选出pass_label为'yes'的数据
-    test_para_text = np.asarray(process_data_test['para_texts'], dtype=object)[inner_merge['pass_label'] == 'yes']
-    standard_para_text = np.asarray(process_data_standard['para_texts'], dtype=object)[inner_merge['pass_label'] == 'yes']
-    ids_yes = inner_merge['id'][inner_merge['pass_label'] == 'yes'].tolist()
-
-    pdf_dis = {}
-    pdf_bleu = {}
-
-    # 对pass_label为'yes'的数据计算编辑距离和BLEU得分
-    for idx, (a, b, id) in enumerate(zip(test_para_text, standard_para_text, ids_yes)):
-        a1 = ''.join(a)
-        b1 = ''.join(b)
-        pdf_dis[id] = Levenshtein_Distance(a, b)
-        pdf_bleu[id] = sentence_bleu([a1], b1)
-
-        
-    result_dict = {}
-    acc_para=[]
-
-    # 对所有数据计算其他指标
-    for index, id_value in enumerate(inner_merge['id'].tolist()):
-        result = {}
-        
-        # 增加file_id到结果中
-        file_id = id_to_file_id_map.get(id_value, "Unknown")
-        result['file_id'] = file_id
-        
-
-        
-        # 根据id判断是否需要计算pdf_dis和pdf_bleu
-        if id_value in ids_yes:
-            result['pdf_dis'] = pdf_dis[id_value]
-            result['pdf_bleu'] = pdf_bleu[id_value]
-        
-        
-
-        # 计算分段准确率
-        single_test_para_num = np.array(process_data_test['para_nums'][index])
-        single_standard_para_num = np.array(process_data_standard['para_nums'][index])
-        acc_para.append(np.mean(single_test_para_num == single_standard_para_num))
-        
-        result['分段准确率'] = acc_para[index]
-    
-        # 行内公式准确率和编辑距离、bleu
-        result['行内公式准确率'] = bbox_match_indicator_general(
-            process_data_test["equations_bboxs"]["inline"][index],
-            process_data_standard["equations_bboxs"]["inline"][index])
-        
-        result['行内公式编辑距离'], result['行内公式bleu'] = equations_indicator(
-            process_data_test["equations_bboxs"]["inline"][index],
-            process_data_standard["equations_bboxs"]["inline"][index],
-            process_data_test["equations_texts"]["inline"][index],
-            process_data_standard["equations_texts"]["inline"][index])
-
-        # 行间公式准确率和编辑距离、bleu
-        result['行间公式准确率'] = bbox_match_indicator_general(
-            process_data_test["equations_bboxs"]["interline"][index],
-            process_data_standard["equations_bboxs"]["interline"][index])
-        
-        result['行间公式编辑距离'], result['行间公式bleu'] = equations_indicator(
-            process_data_test["equations_bboxs"]["interline"][index],
-            process_data_standard["equations_bboxs"]["interline"][index],
-            process_data_test["equations_texts"]["interline"][index],
-            process_data_standard["equations_texts"]["interline"][index])
-
-        # 丢弃文本准确率,丢弃文本标签准确率
-        result['丢弃文本准确率'], result['丢弃文本标签准确率'] = bbox_match_indicator_dropped_text_block(
-            process_data_test["dropped_bboxs"]["text"][index],
-            process_data_standard["dropped_bboxs"]["text"][index],
-            process_data_standard["dropped_tags"]["text"][index],
-            process_data_test["dropped_tags"]["text"][index])
-
-        # 丢弃图片准确率
-        result['丢弃图片准确率'] = bbox_match_indicator_general(
-            process_data_test["dropped_bboxs"]["image"][index],
-            process_data_standard["dropped_bboxs"]["image"][index])
-
-        # 丢弃表格准确率
-        result['丢弃表格准确率'] = bbox_match_indicator_general(
-            process_data_test["dropped_bboxs"]["table"][index],
-            process_data_standard["dropped_bboxs"]["table"][index])
-
-
-        # 将结果存入result_dict
-        result_dict[id_value] = result
-
-    return result_dict
-
-def check_json_files_in_zip_exist(zip_file_path, standard_json_path_in_zip, test_json_path_in_zip):
-    """
-    检查ZIP文件中是否存在指定的JSON文件
-    """
-    with zipfile.ZipFile(zip_file_path, 'r') as z:
-        # 获取ZIP文件中所有文件的列表
-        all_files_in_zip = z.namelist()
-        # 检查标准文件和测试文件是否都在ZIP文件中
-        if standard_json_path_in_zip not in all_files_in_zip or test_json_path_in_zip not in all_files_in_zip:
-            raise FileNotFoundError("One or both of the required JSON files are missing from the ZIP archive.")
-
-
-
-def read_json_files_from_streams(standard_file_stream, test_file_stream):
-    """
-    从文件流中读取JSON文件内容
-    """
-    pdf_json_standard = [json.loads(line) for line in standard_file_stream]
-    pdf_json_test = [json.loads(line) for line in test_file_stream]
-
-    json_standard_origin = pd.DataFrame(pdf_json_standard)
-    json_test_origin = pd.DataFrame(pdf_json_test)
-
-    return json_standard_origin, json_test_origin
-
-def read_json_files_from_zip(zip_file_path, standard_json_path_in_zip, test_json_path_in_zip):
-    """
-    从ZIP文件中读取两个JSON文件并返回它们的DataFrame
-    """
-    with zipfile.ZipFile(zip_file_path, 'r') as z:
-        with z.open(standard_json_path_in_zip) as standard_file_stream, \
-             z.open(test_json_path_in_zip) as test_file_stream:
-
-            standard_file_text_stream = TextIOWrapper(standard_file_stream, encoding='utf-8')
-            test_file_text_stream = TextIOWrapper(test_file_stream, encoding='utf-8')
-
-            json_standard_origin, json_test_origin = read_json_files_from_streams(
-                standard_file_text_stream, test_file_text_stream
-            )
-    
-    return json_standard_origin, json_test_origin
-
-
-def merge_json_data(json_test_df, json_standard_df):
-    """
-    基于ID合并测试和标准数据集,并返回合并后的数据及存在性检查结果。
-
-    参数:
-    - json_test_df: 测试数据的DataFrame。
-    - json_standard_df: 标准数据的DataFrame。
-
-    返回:
-    - inner_merge: 内部合并的DataFrame,包含匹配的数据行。
-    - standard_exist: 标准数据存在性的Series。
-    - test_exist: 测试数据存在性的Series。
-    """
-    test_data = json_test_df[['id', 'mid_json']].drop_duplicates(subset='id', keep='first').reset_index(drop=True)
-    standard_data = json_standard_df[['id', 'mid_json', 'pass_label']].drop_duplicates(subset='id', keep='first').reset_index(drop=True)
-
-    outer_merge = pd.merge(test_data, standard_data, on='id', how='outer')
-    outer_merge.columns = ['id', 'test_mid_json', 'standard_mid_json', 'pass_label']
-
-    standard_exist = outer_merge.standard_mid_json.notnull()
-    test_exist = outer_merge.test_mid_json.notnull()
-
-    inner_merge = pd.merge(test_data, standard_data, on='id', how='inner')
-    inner_merge.columns = ['id', 'test_mid_json', 'standard_mid_json', 'pass_label']
-
-    return inner_merge, standard_exist, test_exist
-
-def generate_filename(base_path):
-    """
-    生成带有当前时间戳的输出文件名。
-    参数:
-    - base_path: 基础路径和文件名前缀。
-    返回:
-    - 带有当前时间戳的完整输出文件名。
-    """
-    current_time = datetime.now().strftime('%Y-%m-%d_%H-%M-%S')
-    return f"{base_path}_{current_time}.json"
-
-def save_results(data_dict, file_path):
-    """
-    将数据字典保存为JSON文件至指定路径。
-    参数:
-    - data_dict: 包含数据的字典。
-    - file_path: 结果文件的保存路径,包括文件名。
-    """
-    with open(file_path, 'w', encoding='utf-8') as f:
-        json.dump(data_dict, f, ensure_ascii=False, indent=4)
-    print(f"结果已经保存到文件:{file_path}")
-
-
-def upload_to_s3(file_path, bucket_name, s3_directory, AWS_ACCESS_KEY, AWS_SECRET_KEY, END_POINT_URL):
-    """
-    上传文件到Amazon S3
-    """
-    # 创建S3客户端
-    s3 = boto3.client('s3', aws_access_key_id=AWS_ACCESS_KEY, aws_secret_access_key=AWS_SECRET_KEY, endpoint_url=END_POINT_URL)
-    try:
-        # 从文件路径中提取文件名
-        file_name = os.path.basename(file_path)
-        
-        # 创建S3对象键,将s3_directory和file_name连接起来
-        s3_object_key = f"{s3_directory}/{file_name}"  # 使用斜杠直接连接
-        
-        # 上传文件到S3
-        s3.upload_file(file_path, bucket_name, s3_object_key)
-        
-        print(f"文件 {file_path} 成功上传到S3存储桶 {bucket_name} 中的目录 {s3_directory},文件名为 {file_name}")
-    except FileNotFoundError:
-        print(f"文件 {file_path} 未找到,请检查文件路径是否正确。")
-    except NoCredentialsError:
-        print("无法找到AWS凭证,请确认您的AWS访问密钥和密钥ID是否正确。")
-    except ClientError as e:
-        print(f"上传文件时发生错误:{e}")
-
-
-
-
-
-def compare_edit_distance(json_file, overall_report):
-    with open(json_file, 'r',encoding='utf-8') as f:
-        json_data = json.load(f)
-    
-    json_edit_distance = json_data['pdf间的平均编辑距离']
-    
-    if overall_report['pdf间的平均编辑距离'] > json_edit_distance:
-        return 0
-    else:
-        return 1
-
-
-
-def main(standard_file, test_file, zip_file, overall_path, base_data_path, badcase_path=None, s3_bucket_name=None, s3_file_directory=None, 
-         aws_access_key=None, aws_secret_key=None, end_point_url=None):
-    """
-    主函数,执行整个评估流程。
-    
-    参数:
-    - standard_file: 标准文件的路径。
-    - test_file: 测试文件的路径。
-    - zip_file: 压缩包的路径的路径。
-    - badcase_path: badcase文件的基础路径和文件名前缀(可选)。
-    - overall_path: overall文件的基础路径和文件名前缀。
-    - base_data_path: 基础数据路径。
-    - s3_bucket_name: S3桶名称(可选)。
-    - s3_file_directory: S3上的文件保存目录(可选)。
-    - AWS_ACCESS_KEY, AWS_SECRET_KEY, END_POINT_URL: AWS访问凭证和端点URL(可选)。
-    """
-    # 检查文件是否存在
-    check_json_files_in_zip_exist(zip_file, standard_file, test_file)
-
-    # 读取JSON文件内容
-    json_standard_origin, json_test_origin = read_json_files_from_zip(zip_file, standard_file, test_file)
-
-    # 合并JSON数据
-    inner_merge, standard_exist, test_exist = merge_json_data(json_test_origin, json_standard_origin)
-
-    # 计算总体指标
-    overall_report_dict = overall_calculate_metrics(inner_merge, inner_merge['test_mid_json'], inner_merge['standard_mid_json'], standard_exist, test_exist)
-
-    # 生成带时间戳的输出文件名
-    if badcase_path:
-        badcase_file = generate_filename(badcase_path)
-        result_dict =  result_dict = calculate_metrics(inner_merge, inner_merge['test_mid_json'], inner_merge['standard_mid_json'], json_standard_origin)
-        save_results(result_dict, badcase_file)
-
-    overall_file = generate_filename(overall_path)
-    save_results(overall_report_dict, overall_file)
-
-    result = compare_edit_distance(base_data_path, overall_report_dict)
-
-    if all([s3_bucket_name, s3_file_directory, aws_access_key, aws_secret_key, end_point_url]):
-        try:
-            if badcase_path:
-                upload_to_s3(badcase_file, s3_bucket_name, s3_file_directory, aws_access_key, aws_secret_key, end_point_url)
-            upload_to_s3(overall_file, s3_bucket_name, s3_file_directory, aws_access_key, aws_secret_key, end_point_url)
-        except Exception as e:
-            print(f"上传到S3时发生错误: {e}")
-
-    print(result)
-    assert result == 1
-
-
-
-if __name__ == "__main__":
-    parser = argparse.ArgumentParser(description="主函数,执行整个评估流程。")
-    parser.add_argument('standard_file', type=str, help='标准文件的路径。')
-    parser.add_argument('test_file', type=str, help='测试文件的路径。')
-    parser.add_argument('zip_file', type=str, help='压缩包的路径。')
-    parser.add_argument('overall_path', type=str, help='overall文件的基础路径和文件名前缀。')
-    parser.add_argument('base_data_path', type=str, help='基准文件的基础路径和文件名前缀。')
-    parser.add_argument('--badcase_path', type=str, default=None, help='badcase文件的基础路径和文件名前缀(可选)。')
-    parser.add_argument('--s3_bucket_name', type=str, help='S3桶名称。', default=None)
-    parser.add_argument('--s3_file_directory', type=str, help='S3上的文件保存目录。', default=None)
-    parser.add_argument('--AWS_ACCESS_KEY', type=str, help='AWS访问密钥。', default=None)
-    parser.add_argument('--AWS_SECRET_KEY', type=str, help='AWS秘密密钥。', default=None)
-    parser.add_argument('--END_POINT_URL', type=str, help='AWS端点URL。', default=None)
-
-    args = parser.parse_args()
-
-    main(args.standard_file, args.test_file, args.zip_file, args.overall_path, args.base_data_path,
-         badcase_path=args.badcase_path, s3_bucket_name=args.s3_bucket_name, 
-         s3_file_directory=args.s3_file_directory, aws_access_key=args.AWS_ACCESS_KEY, 
-         aws_secret_key=args.AWS_SECRET_KEY, end_point_url=args.END_POINT_URL)
-

+ 0 - 895
tools/over_all_benchamark.py

@@ -1,895 +0,0 @@
-import json
-import pandas as pd
-import numpy as np
-from nltk.translate.bleu_score import sentence_bleu, SmoothingFunction
-import argparse
-import os
-from sklearn.metrics import classification_report
-from sklearn import metrics
-from datetime import datetime
-import boto3
-from botocore.exceptions import NoCredentialsError, ClientError
-from io import TextIOWrapper
-import zipfile
-
-
-
-def process_equations_and_blocks(json_data, is_standard):
-    """
-    处理JSON数据,提取公式、文本块、图片块和表格块的边界框和文本信息。
-    
-    参数:
-    - json_data: 列表,包含标准文档或测试文档的JSON数据。
-    - is_standard: 布尔值,指示处理的数据是否为标准文档。
-    
-    返回:
-    - 字典,包含处理后的数据。
-    """
-    equations_bboxs = {"inline": [], "interline": []}
-    equations_texts = {"inline": [], "interline": []}
-    dropped_bboxs = {"text": [], "image": [], "table": []}
-    dropped_tags = {"text": []}
-    para_texts = []
-    para_nums = []
-
-    for i in json_data:
-        mid_json = pd.DataFrame(i).iloc[:,:-1] if is_standard else pd.DataFrame(i)
-        page_data = {
-            "equations_bboxs_list": {"inline": [], "interline": []},
-            "equations_texts_list": {"inline": [], "interline": []},
-            "dropped_bboxs_list": {"text": [], "image": [], "table": []},
-            "dropped_tags_list": {"text": []},
-            "para_texts_list": [],
-            "para_nums_list": []
-        }
-
-        for eq_type in ["inline", "interline"]:
-            for equations in mid_json.loc[f"{eq_type}_equations", :]:
-                bboxs = [eq['bbox'] for eq in equations]
-                texts = [eq.get('latex_text' if is_standard else 'content', '') for eq in equations]
-                page_data["equations_bboxs_list"][eq_type].append(bboxs)
-                page_data["equations_texts_list"][eq_type].append(texts)
-        
-        equations_bboxs["inline"].append(page_data["equations_bboxs_list"]["inline"])
-        equations_bboxs["interline"].append(page_data["equations_bboxs_list"]["interline"])
-        equations_texts["inline"].append(page_data["equations_texts_list"]["inline"])
-        equations_texts["interline"].append(page_data["equations_texts_list"]["interline"])
-
-
-        # 提取丢弃的文本块信息
-        for dropped_text_blocks in mid_json.loc['droped_text_block',:]:
-            bboxs, tags = [], []
-            for block in dropped_text_blocks:
-                bboxs.append(block['bbox'])
-                tags.append(block.get('tag', 'None'))
-            
-            page_data["dropped_bboxs_list"]["text"].append(bboxs)
-            page_data["dropped_tags_list"]["text"].append(tags)
-        
-        dropped_bboxs["text"].append(page_data["dropped_bboxs_list"]["text"])
-        dropped_tags["text"].append(page_data["dropped_tags_list"]["text"])
-
-
-      
-        # 同时处理删除的图片块和表格块
-        for block_type in ['image', 'table']:
-            # page_blocks_list = []
-            for blocks in mid_json.loc[f'droped_{block_type}_block', :]:
-                # 如果是标准数据,直接添加整个块的列表
-                if is_standard:
-                    page_data["dropped_bboxs_list"][block_type].append(blocks)
-                # 如果是测试数据,检查列表是否非空,并提取每个块的边界框
-                else:
-                    page_blocks = [block['bbox'] for block in blocks] if blocks else []
-                    page_data["dropped_bboxs_list"][block_type].append(page_blocks)
-            
-        # 将当前页面的块边界框列表添加到结果字典中
-        dropped_bboxs['image'].append(page_data["dropped_bboxs_list"]['image'])
-        dropped_bboxs['table'].append(page_data["dropped_bboxs_list"]['table'])
-        
-        
-        # 处理段落
-        for para_blocks in mid_json.loc['para_blocks', :]:
-            page_data["para_nums_list"].append(len(para_blocks))  # 计算段落数
-
-            for para_block in para_blocks:
-                if is_standard:
-                    # 标准数据直接提取文本
-                    page_data["para_texts_list"].append(para_block['text'])
-                else:
-                    # 测试数据可能需要检查'content'是否存在
-                    if 'spans' in para_block[0] and para_block[0]['spans'][0]['type'] == 'text':
-                        page_data["para_texts_list"].append(para_block[0]['spans'][0].get('content', ''))
-            
-            
-        
-        para_texts.append(page_data["para_texts_list"])
-        para_nums.append(page_data["para_nums_list"])
-
-    return {
-        "equations_bboxs": equations_bboxs,
-        "equations_texts": equations_texts,
-        "dropped_bboxs": dropped_bboxs,
-        "dropped_tags": dropped_tags,
-        "para_texts": para_texts,
-        "para_nums": para_nums
-    }
-
-
-
-
-
-
-
-def bbox_match_indicator_general(test_bboxs_list, standard_bboxs_list):
-    """
-    计算边界框匹配指标,支持掉落的表格、图像和文本块。
-    此版本的函数专注于计算基于边界框的匹配指标,而不涉及标签匹配逻辑。
-    
-    参数:
-    - test_bboxs: 测试集的边界框列表,按页面组织。
-    - standard_bboxs: 标准集的边界框列表,按页面组织。
-
-    返回:
-    - 一个字典,包含准确度、精确度、召回率和F1分数。
-    """
-        # 如果两个列表都完全为空,返回0值指标
-    if all(len(page) == 0 for page in test_bboxs_list) and all(len(page) == 0 for page in standard_bboxs_list):
-        return {'accuracy': 0, 'precision': 0, 'recall': 0, 'f1_score': 0}
-    
-
-    matched_bbox = []
-    matched_standard_bbox = []
-
-    for test_page, standard_page in zip(test_bboxs_list, standard_bboxs_list):
-        test_page_bbox, standard_page_bbox = [], []
-        for standard_bbox in standard_page:
-            if len(standard_bbox) != 4:
-                continue
-            matched = False
-            for test_bbox in test_page:
-                if len(test_bbox) == 4 and bbox_offset(standard_bbox, test_bbox):
-                    matched = True
-                    break
-            test_page_bbox.append(int(matched))
-            standard_page_bbox.append(1)
-
-        # 后处理以处理多删情况,保持原逻辑不变
-        diff_num = len(test_page) + test_page_bbox.count(0) - len(standard_page)
-        if diff_num > 0:
-            test_page_bbox.extend([1] * diff_num)
-            standard_page_bbox.extend([0] * diff_num)
-
-        matched_bbox.extend(test_page_bbox)
-        matched_standard_bbox.extend(standard_page_bbox)
-
-    block_report = {
-        'accuracy': metrics.accuracy_score(matched_standard_bbox, matched_bbox),
-        'precision': metrics.precision_score(matched_standard_bbox, matched_bbox, zero_division=0),
-        'recall': metrics.recall_score(matched_standard_bbox, matched_bbox, zero_division=0),
-        'f1_score': metrics.f1_score(matched_standard_bbox, matched_bbox, zero_division=0)
-    }
-
-    return block_report
-
-
-
-
-
-
-def bbox_offset(b_t, b_s):
-    """
-    判断两个边界框(bounding box)之间的重叠程度是否符合给定的标准。
-    
-    参数:
-    - b_t: 测试文档中的边界框(bbox),格式为(x1, y1, x2, y2),
-           其中(x1, y1)是左上角的坐标,(x2, y2)是右下角的坐标。
-    - b_s: 标准文档中的边界框(bbox),格式同上。
-    
-    返回:
-    - True: 如果两个边界框的重叠面积与两个边界框合计面积的差的比例超过0.95,
-            表明它们足够接近。
-    - False: 否则,表示两个边界框不足够接近。
-    
-    注意:
-    - 函数首先计算两个bbox的交集区域,如果这个区域的面积相对于两个bbox的面积差非常大,
-      则认为这两个bbox足够接近。
-    - 如果交集区域的计算结果导致无效区域(比如宽度或高度为负值),或者分母为0(即两个bbox完全不重叠),
-      则函数会返回False。
-    """
-
-    # 分别提取两个bbox的坐标
-    x1_t, y1_t, x2_t, y2_t = b_t
-    x1_s, y1_s, x2_s, y2_s = b_s
-  
-    # 计算两个bbox交集区域的坐标
-    x1 = max(x1_t, x1_s)
-    x2 = min(x2_t, x2_s)
-    y1 = max(y1_t, y1_s)
-    y2 = min(y2_t, y2_s)
-    
-    # 如果计算出的交集区域有效,则计算其面积
-    if x2 > x1 and y2 > y1:
-        area_overlap = (x2 - x1) * (y2 - y1)
-    else:
-        # 交集区域无效,视为无重叠
-        area_overlap = 0
-
-    # 计算两个bbox的总面积,减去重叠部分避免重复计算
-    area_t = (x2_t - x1_t) * (y2_t - y1_t) + (x2_s - x1_s) * (y2_s - y1_s) - area_overlap
-
-    # 判断重叠面积是否符合标准
-    
-    if area_t-area_overlap==0 or area_overlap/area_t>0.95:
-        return True
-    else:
-        return False
-    
-
-def Levenshtein_Distance(str1, str2):
-    """
-    计算并返回两个字符串之间的Levenshtein编辑距离。
-    
-    参数:
-    - str1: 字符串,第一个比较字符串。
-    - str2: 字符串,第二个比较字符串。
-    
-    返回:
-    - int: str1和str2之间的Levenshtein距离。
-    
-    方法:
-    - 使用动态规划构建一个矩阵(matrix),其中matrix[i][j]表示str1的前i个字符和str2的前j个字符之间的Levenshtein距离。
-    - 矩阵的初始值设定为边界情况,即一个字符串与空字符串之间的距离。
-    - 遍历矩阵填充每个格子的值,根据字符是否相等选择插入、删除或替换操作的最小代价。
-    """
-    # 初始化矩阵,大小为(len(str1)+1) x (len(str2)+1),边界情况下的距离为i和j
-    matrix = [[i + j for j in range(len(str2) + 1)] for i in range(len(str1) + 1)]
-
-    # 遍历str1和str2的每个字符,更新矩阵中的值
-    for i in range(1, len(str1) + 1):
-        for j in range(1, len(str2) + 1):
-            # 如果当前字符相等,替换代价为0;否则为1
-            d = 0 if (str1[i - 1] == str2[j - 1]) else 1
-            # 更新当前位置的值为从str1[i]转换到str2[j]的最小操作数
-            matrix[i][j] = min(matrix[i - 1][j] + 1,  # 删除操作
-                               matrix[i][j - 1] + 1,  # 插入操作
-                               matrix[i - 1][j - 1] + d)  # 替换操作
-    # 返回右下角的值,即str1和str2之间的Levenshtein距离
-    return matrix[len(str1)][len(str2)]
-
-
-def equations_indicator(test_equations_bboxs, standard_equations_bboxs, test_equations, standard_equations):
-    """
-    根据边界框匹配的方程计算编辑距离和BLEU分数。
-    
-    参数:
-    - test_equations_bboxs: 测试方程的边界框列表。
-    - standard_equations_bboxs: 标准方程的边界框列表。
-    - test_equations: 测试方程的列表。
-    - standard_equations: 标准方程的列表。
-    
-    返回:
-    - 一个元组,包含匹配方程的平均Levenshtein编辑距离和BLEU分数。
-    """
-    
-    # 初始化匹配方程列表
-    test_match_equations = []
-    standard_match_equations = []
-
-    # 匹配方程基于边界框重叠
-    for index, (test_bbox, standard_bbox) in enumerate(zip(test_equations_bboxs, standard_equations_bboxs)):
-        if not (test_bbox and standard_bbox):  # 跳过任一空列表
-            continue
-        for i, sb in enumerate(standard_bbox):
-            for j, tb in enumerate(test_bbox):
-                if bbox_offset(sb, tb):
-                    standard_match_equations.append(standard_equations[index][i])
-                    test_match_equations.append(test_equations[index][j])
-                    break  # 找到第一个匹配后即跳出循环
-
-    # 使用Levenshtein距离和BLEU分数计算编辑距离
-    dis = [Levenshtein_Distance(a, b) for a, b in zip(test_match_equations, standard_match_equations) if a and b]
-    # 应用平滑函数计算BLEU分数
-    sm_func = SmoothingFunction().method1
-    bleu = [sentence_bleu([a.split()], b.split(), smoothing_function=sm_func) for a, b in zip(test_match_equations, standard_match_equations) if a and b]
-
-    # 计算平均编辑距离和BLEU分数,处理空列表情况
-    equations_edit = np.mean(dis) if dis else float('0.0')
-    equations_bleu = np.mean(bleu) if bleu else float('0.0')
-
-    return equations_edit, equations_bleu
-
-
-
-def bbox_match_indicator_general(test_bboxs_list, standard_bboxs_list):
-    """
-    计算边界框匹配指标,支持掉落的表格、图像和文本块。
-    此版本的函数专注于计算基于边界框的匹配指标,而不涉及标签匹配逻辑。
-    
-    参数:
-    - test_bboxs: 测试集的边界框列表,按页面组织。
-    - standard_bboxs: 标准集的边界框列表,按页面组织。
-
-    返回:
-    - 一个字典,包含准确度、精确度、召回率和F1分数。
-    """
-        # 如果两个列表都完全为空,返回0值指标
-    if all(len(page) == 0 for page in test_bboxs_list) and all(len(page) == 0 for page in standard_bboxs_list):
-        return {'accuracy': 0, 'precision': 0, 'recall': 0, 'f1_score': 0}
-    
-
-    matched_bbox = []
-    matched_standard_bbox = []
-
-    for test_page, standard_page in zip(test_bboxs_list, standard_bboxs_list):
-        test_page_bbox, standard_page_bbox = [], []
-        for standard_bbox in standard_page:
-            if len(standard_bbox) != 4:
-                continue
-            matched = False
-            for test_bbox in test_page:
-                if len(test_bbox) == 4 and bbox_offset(standard_bbox, test_bbox):
-                    matched = True
-                    break
-            test_page_bbox.append(int(matched))
-            standard_page_bbox.append(1)
-
-        # 后处理以处理多删情况,保持原逻辑不变
-        diff_num = len(test_page) + test_page_bbox.count(0) - len(standard_page)
-        if diff_num > 0:
-            test_page_bbox.extend([1] * diff_num)
-            standard_page_bbox.extend([0] * diff_num)
-
-        matched_bbox.extend(test_page_bbox)
-        matched_standard_bbox.extend(standard_page_bbox)
-
-    block_report = {
-        'accuracy': metrics.accuracy_score(matched_standard_bbox, matched_bbox),
-        'precision': metrics.precision_score(matched_standard_bbox, matched_bbox, zero_division=0),
-        'recall': metrics.recall_score(matched_standard_bbox, matched_bbox, zero_division=0),
-        'f1_score': metrics.f1_score(matched_standard_bbox, matched_bbox, zero_division=0)
-    }
-
-    return block_report
-
-
-def bbox_match_indicator_dropped_text_block(test_dropped_text_bboxs, standard_dropped_text_bboxs, standard_dropped_text_tag, test_dropped_text_tag):
-    """
-    计算丢弃文本块的边界框匹配相关指标,包括准确率、精确率、召回率和F1分数,
-    同时也计算文本块标签的匹配指标。
-
-    参数:
-    - test_dropped_text_bboxs: 测试集的丢弃文本块边界框列表
-    - standard_dropped_text_bboxs: 标准集的丢弃文本块边界框列表
-    - standard_dropped_text_tag: 标准集的丢弃文本块标签列表
-    - test_dropped_text_tag: 测试集的丢弃文本块标签列表
-
-    返回:
-    - 一个包含边界框匹配指标和文本块标签匹配指标的元组
-    """
-    test_text_bbox, standard_text_bbox = [], []
-    test_tag, standard_tag = [], []
-
-    for index, (test_page, standard_page) in enumerate(zip(test_dropped_text_bboxs, standard_dropped_text_bboxs)):
-        # 初始化每个页面的结果列表
-        test_page_tag, standard_page_tag = [], []
-        test_page_bbox, standard_page_bbox = [], []
-
-        for i, standard_bbox in enumerate(standard_page):
-            matched = False
-            for j, test_bbox in enumerate(test_page):
-                if bbox_offset(standard_bbox, test_bbox):
-                    # 匹配成功,记录标签和边界框匹配结果
-                    matched = True
-                    test_page_tag.append(test_dropped_text_tag[index][j])
-                    test_page_bbox.append(1)
-                    break
-
-            if not matched:
-                # 未匹配,记录'None'和边界框未匹配结果
-                test_page_tag.append('None')
-                test_page_bbox.append(0)
-
-            # 标准边界框和标签总是被视为匹配的
-            standard_page_tag.append(standard_dropped_text_tag[index][i])
-            standard_page_bbox.append(1)
-
-        # 处理可能的多删情况
-        handle_multi_deletion(test_page, test_page_tag, test_page_bbox, standard_page_tag, standard_page_bbox)
-
-        # 合并当前页面的结果到整体结果中
-        test_tag.extend(test_page_tag)
-        standard_tag.extend(standard_page_tag)
-        test_text_bbox.extend(test_page_bbox)
-        standard_text_bbox.extend(standard_page_bbox)
-
-    # 计算和返回匹配指标
-    text_block_report = {
-        'accuracy': metrics.accuracy_score(standard_text_bbox, test_text_bbox),
-        'precision': metrics.precision_score(standard_text_bbox, test_text_bbox, zero_division=0),
-        'recall': metrics.recall_score(standard_text_bbox, test_text_bbox, zero_division=0),
-        'f1_score': metrics.f1_score(standard_text_bbox, test_text_bbox, zero_division=0)
-    }
-
-    # 计算和返回标签匹配指标
-    text_block_tag_report = classification_report(y_true=standard_tag, y_pred=test_tag, labels=list(set(standard_tag) - {'None'}), output_dict=True, zero_division=0)
-    del text_block_tag_report["macro avg"]
-    del text_block_tag_report["weighted avg"]
-    
-    return text_block_report, text_block_tag_report
-
-def handle_multi_deletion(test_page, test_page_tag, test_page_bbox, standard_page_tag, standard_page_bbox):
-    """
-    处理多删情况,即测试页面的边界框或标签数量多于标准页面。
-    """
-    excess_count = len(test_page) + test_page_bbox.count(0) - len(standard_page_tag)
-    if excess_count > 0:
-        # 对于多出的项,将它们视为正确匹配的边界框,但标签视为'None'
-        test_page_bbox.extend([1] * excess_count)
-        standard_page_bbox.extend([0] * excess_count)
-        test_page_tag.extend(['None'] * excess_count)
-        standard_page_tag.extend(['None'] * excess_count)
-
-
-
-
-
-
-def consolidate_data(test_data, standard_data, key_path):
-    """
-    Consolidates data from test and standard datasets based on the provided key path.
-    
-    :param test_data: Dictionary containing the test dataset.
-    :param standard_data: Dictionary containing the standard dataset.
-    :param key_path: List of keys leading to the desired data within the dictionaries.
-    :return: List containing all items from both test and standard data at the specified key path.
-    """
-    # Initialize an empty list to hold the consolidated data
-    overall_data_standard = []
-    overall_data_test = []
-    
-    # Helper function to recursively navigate through the dictionaries based on the key path
-    def extract_data(source_data, keys):
-        for key in keys[:-1]:
-            source_data = source_data.get(key, {})
-        return source_data.get(keys[-1], [])
-    
-    for data in extract_data(standard_data, key_path):
-    # 假设每个 single_table_tags 已经是一个列表,直接将它的元素添加到总列表中
-        overall_data_standard.extend(data)
-    
-    for data in extract_data(test_data, key_path):
-         overall_data_test.extend(data)
-    # Extract and extend the overall data list with items from both test and standard datasets
-
-    
-    return overall_data_standard, overall_data_test
-
-def overall_calculate_metrics(inner_merge, json_test, json_standard,standard_exist, test_exist):
-    """
-    计算整体的指标,包括准确率、精确率、召回率、F1值、平均编辑距离、平均BLEU得分、分段准确率、公式准确率、公式编辑距离、公式BLEU、丢弃文本准确率、丢弃文本标签准确率、丢弃图片准确率、丢弃表格准确率等。
-    
-    Args:
-        inner_merge (dict): 包含merge信息的字典,包括pass_label和id等信息。
-        json_test (dict): 测试集的json数据。
-        json_standard (dict): 标准集的json数据。
-        standard_exist (list): 标准集中存在的id列表。
-        test_exist (list): 测试集中存在的id列表。
-    
-    Returns:
-        dict: 包含整体指标值的字典。
-    
-    """
-
-    process_data_standard = process_equations_and_blocks(json_standard, is_standard=True)
-    process_data_test = process_equations_and_blocks(json_test, is_standard=False)
-
-
-    overall_report = {}
-    overall_report['accuracy']=metrics.accuracy_score(standard_exist,test_exist)
-    overall_report['precision']=metrics.precision_score(standard_exist,test_exist)
-    overall_report['recall']=metrics.recall_score(standard_exist,test_exist)
-    overall_report['f1_score']=metrics.f1_score(standard_exist,test_exist)
-    overall_report
-
-    test_para_text = np.asarray(process_data_test['para_texts'], dtype=object)[inner_merge['pass_label'] == 'yes']
-    standard_para_text = np.asarray(process_data_standard['para_texts'], dtype=object)[inner_merge['pass_label'] == 'yes']
-    ids_yes = inner_merge['id'][inner_merge['pass_label'] == 'yes'].tolist()
-
-    pdf_dis = {}
-    pdf_bleu = {}
-
-    # 对pass_label为'yes'的数据计算编辑距离和BLEU得分
-    for idx,(a, b, id) in enumerate(zip(test_para_text, standard_para_text, ids_yes)):
-        a1 = ''.join(a)
-        b1 = ''.join(b)
-        pdf_dis[id] = Levenshtein_Distance(a, b)
-        pdf_bleu[id] = sentence_bleu([a1], b1)
-
-    overall_report['pdf间的平均编辑距离'] = np.mean(list(pdf_dis.values()))
-    overall_report['pdf间的平均bleu'] = np.mean(list(pdf_bleu.values()))
-
-    # Consolidate equations bboxs inline
-    overall_equations_bboxs_inline_standard,overall_equations_bboxs_inline_test = consolidate_data(process_data_test, process_data_standard, ["equations_bboxs", "inline"])
-
-    # # Consolidate equations texts inline
-    overall_equations_texts_inline_standard,overall_equations_texts_inline_test = consolidate_data(process_data_test, process_data_standard, ["equations_texts", "inline"])
-
-    # Consolidate equations bboxs interline
-    overall_equations_bboxs_interline_standard,overall_equations_bboxs_interline_test = consolidate_data(process_data_test, process_data_standard, ["equations_bboxs", "interline"])
-
-    # Consolidate equations texts interline
-    overall_equations_texts_interline_standard,overall_equations_texts_interline_test = consolidate_data(process_data_test, process_data_standard, ["equations_texts", "interline"])
-
-    overall_dropped_bboxs_text_standard,overall_dropped_bboxs_text_test = consolidate_data(process_data_test, process_data_standard, ["dropped_bboxs","text"])
-
-    overall_dropped_tags_text_standard,overall_dropped_tags_text_test = consolidate_data(process_data_test, process_data_standard, ["dropped_tags","text"])
-
-    overall_dropped_bboxs_image_standard,overall_dropped_bboxs_image_test = consolidate_data(process_data_test, process_data_standard, ["dropped_bboxs","image"])
-
-
-    overall_dropped_bboxs_table_standard,overall_dropped_bboxs_table_test=consolidate_data(process_data_test, process_data_standard,["dropped_bboxs","table"])
-
-
-    para_nums_test = process_data_test['para_nums']
-    para_nums_standard=process_data_standard['para_nums']
-    overall_para_nums_standard = [item for sublist in para_nums_standard for item in (sublist if isinstance(sublist, list) else [sublist])]
-    overall_para_nums_test = [item for sublist in para_nums_test for item in (sublist if isinstance(sublist, list) else [sublist])]
-
-
-    test_para_num=np.array(overall_para_nums_test)
-    standard_para_num=np.array(overall_para_nums_standard)
-    acc_para=np.mean(test_para_num==standard_para_num)
-
-
-    overall_report['分段准确率'] = acc_para
-
-    # 行内公式准确率和编辑距离、bleu
-    overall_report['行内公式准确率'] = bbox_match_indicator_general(
-        overall_equations_bboxs_inline_test,
-        overall_equations_bboxs_inline_standard)
-
-    overall_report['行内公式编辑距离'], overall_report['行内公式bleu'] = equations_indicator(
-        overall_equations_bboxs_inline_test,
-        overall_equations_bboxs_inline_standard,
-        overall_equations_texts_inline_test,
-        overall_equations_texts_inline_standard)
-
-    # 行间公式准确率和编辑距离、bleu
-    overall_report['行间公式准确率'] = bbox_match_indicator_general(
-        overall_equations_bboxs_interline_test,
-        overall_equations_bboxs_interline_standard)
-
-    overall_report['行间公式编辑距离'], overall_report['行间公式bleu'] = equations_indicator(
-        overall_equations_bboxs_interline_test,
-        overall_equations_bboxs_interline_standard,
-        overall_equations_texts_interline_test,
-        overall_equations_texts_interline_standard)
-
-    # 丢弃文本准确率,丢弃文本标签准确率
-    overall_report['丢弃文本准确率'], overall_report['丢弃文本标签准确率'] = bbox_match_indicator_dropped_text_block(
-        overall_dropped_bboxs_text_test,
-        overall_dropped_bboxs_text_standard,
-        overall_dropped_tags_text_standard,
-        overall_dropped_tags_text_test)
-
-    # 丢弃图片准确率
-    overall_report['丢弃图片准确率'] = bbox_match_indicator_general(
-        overall_dropped_bboxs_image_test,
-        overall_dropped_bboxs_image_standard)
-
-    # 丢弃表格准确率
-    overall_report['丢弃表格准确率'] = bbox_match_indicator_general(
-        overall_dropped_bboxs_table_test,
-        overall_dropped_bboxs_table_standard)
-
-    return overall_report
-
-
-
-def calculate_metrics(inner_merge, json_test, json_standard, json_standard_origin):
-    """
-    计算指标
-    """
-    # 创建ID到file_id的映射
-    id_to_file_id_map = pd.Series(json_standard_origin.file_id.values, index=json_standard_origin.id).to_dict()
-
-    # 处理标准数据和测试数据
-    process_data_standard = process_equations_and_blocks(json_standard, is_standard=True)
-    process_data_test = process_equations_and_blocks(json_test, is_standard=False)
-
-    # 从inner_merge中筛选出pass_label为'yes'的数据
-    test_para_text = np.asarray(process_data_test['para_texts'], dtype=object)[inner_merge['pass_label'] == 'yes']
-    standard_para_text = np.asarray(process_data_standard['para_texts'], dtype=object)[inner_merge['pass_label'] == 'yes']
-    ids_yes = inner_merge['id'][inner_merge['pass_label'] == 'yes'].tolist()
-
-    pdf_dis = {}
-    pdf_bleu = {}
-
-    # 对pass_label为'yes'的数据计算编辑距离和BLEU得分
-    for idx, (a, b, id) in enumerate(zip(test_para_text, standard_para_text, ids_yes)):
-        a1 = ''.join(a)
-        b1 = ''.join(b)
-        pdf_dis[id] = Levenshtein_Distance(a, b)
-        pdf_bleu[id] = sentence_bleu([a1], b1)
-
-        
-    result_dict = {}
-    acc_para=[]
-
-    # 对所有数据计算其他指标
-    for index, id_value in enumerate(inner_merge['id'].tolist()):
-        result = {}
-        
-        # 增加file_id到结果中
-        file_id = id_to_file_id_map.get(id_value, "Unknown")
-        result['file_id'] = file_id
-        
-
-        
-        # 根据id判断是否需要计算pdf_dis和pdf_bleu
-        if id_value in ids_yes:
-            result['pdf_dis'] = pdf_dis[id_value]
-            result['pdf_bleu'] = pdf_bleu[id_value]
-        
-        
-
-        # 计算分段准确率
-        single_test_para_num = np.array(process_data_test['para_nums'][index])
-        single_standard_para_num = np.array(process_data_standard['para_nums'][index])
-        acc_para.append(np.mean(single_test_para_num == single_standard_para_num))
-        
-        result['分段准确率'] = acc_para[index]
-    
-        # 行内公式准确率和编辑距离、bleu
-        result['行内公式准确率'] = bbox_match_indicator_general(
-            process_data_test["equations_bboxs"]["inline"][index],
-            process_data_standard["equations_bboxs"]["inline"][index])
-        
-        result['行内公式编辑距离'], result['行内公式bleu'] = equations_indicator(
-            process_data_test["equations_bboxs"]["inline"][index],
-            process_data_standard["equations_bboxs"]["inline"][index],
-            process_data_test["equations_texts"]["inline"][index],
-            process_data_standard["equations_texts"]["inline"][index])
-
-        # 行间公式准确率和编辑距离、bleu
-        result['行间公式准确率'] = bbox_match_indicator_general(
-            process_data_test["equations_bboxs"]["interline"][index],
-            process_data_standard["equations_bboxs"]["interline"][index])
-        
-        result['行间公式编辑距离'], result['行间公式bleu'] = equations_indicator(
-            process_data_test["equations_bboxs"]["interline"][index],
-            process_data_standard["equations_bboxs"]["interline"][index],
-            process_data_test["equations_texts"]["interline"][index],
-            process_data_standard["equations_texts"]["interline"][index])
-
-        # 丢弃文本准确率,丢弃文本标签准确率
-        result['丢弃文本准确率'], result['丢弃文本标签准确率'] = bbox_match_indicator_dropped_text_block(
-            process_data_test["dropped_bboxs"]["text"][index],
-            process_data_standard["dropped_bboxs"]["text"][index],
-            process_data_standard["dropped_tags"]["text"][index],
-            process_data_test["dropped_tags"]["text"][index])
-
-        # 丢弃图片准确率
-        result['丢弃图片准确率'] = bbox_match_indicator_general(
-            process_data_test["dropped_bboxs"]["image"][index],
-            process_data_standard["dropped_bboxs"]["image"][index])
-
-        # 丢弃表格准确率
-        result['丢弃表格准确率'] = bbox_match_indicator_general(
-            process_data_test["dropped_bboxs"]["table"][index],
-            process_data_standard["dropped_bboxs"]["table"][index])
-
-
-        # 将结果存入result_dict
-        result_dict[id_value] = result
-
-    return result_dict
-
-def check_json_files_in_zip_exist(zip_file_path, standard_json_path_in_zip, test_json_path_in_zip):
-    """
-    检查ZIP文件中是否存在指定的JSON文件
-    """
-    with zipfile.ZipFile(zip_file_path, 'r') as z:
-        # 获取ZIP文件中所有文件的列表
-        all_files_in_zip = z.namelist()
-        # 检查标准文件和测试文件是否都在ZIP文件中
-        if standard_json_path_in_zip not in all_files_in_zip or test_json_path_in_zip not in all_files_in_zip:
-            raise FileNotFoundError("One or both of the required JSON files are missing from the ZIP archive.")
-
-
-
-def read_json_files_from_streams(standard_file_stream, test_file_stream):
-    """
-    从文件流中读取JSON文件内容
-    """
-    pdf_json_standard = [json.loads(line) for line in standard_file_stream]
-    pdf_json_test = [json.loads(line) for line in test_file_stream]
-
-    json_standard_origin = pd.DataFrame(pdf_json_standard)
-    json_test_origin = pd.DataFrame(pdf_json_test)
-
-    return json_standard_origin, json_test_origin
-
-def read_json_files_from_zip(zip_file_path, standard_json_path_in_zip, test_json_path_in_zip):
-    """
-    从ZIP文件中读取两个JSON文件并返回它们的DataFrame
-    """
-    with zipfile.ZipFile(zip_file_path, 'r') as z:
-        with z.open(standard_json_path_in_zip) as standard_file_stream, \
-             z.open(test_json_path_in_zip) as test_file_stream:
-
-            standard_file_text_stream = TextIOWrapper(standard_file_stream, encoding='utf-8')
-            test_file_text_stream = TextIOWrapper(test_file_stream, encoding='utf-8')
-
-            json_standard_origin, json_test_origin = read_json_files_from_streams(
-                standard_file_text_stream, test_file_text_stream
-            )
-    
-    return json_standard_origin, json_test_origin
-
-
-def merge_json_data(json_test_df, json_standard_df):
-    """
-    基于ID合并测试和标准数据集,并返回合并后的数据及存在性检查结果。
-
-    参数:
-    - json_test_df: 测试数据的DataFrame。
-    - json_standard_df: 标准数据的DataFrame。
-
-    返回:
-    - inner_merge: 内部合并的DataFrame,包含匹配的数据行。
-    - standard_exist: 标准数据存在性的Series。
-    - test_exist: 测试数据存在性的Series。
-    """
-    test_data = json_test_df[['id', 'mid_json']].drop_duplicates(subset='id', keep='first').reset_index(drop=True)
-    standard_data = json_standard_df[['id', 'mid_json', 'pass_label']].drop_duplicates(subset='id', keep='first').reset_index(drop=True)
-
-    outer_merge = pd.merge(test_data, standard_data, on='id', how='outer')
-    outer_merge.columns = ['id', 'test_mid_json', 'standard_mid_json', 'pass_label']
-
-    standard_exist = outer_merge.standard_mid_json.notnull()
-    test_exist = outer_merge.test_mid_json.notnull()
-
-    inner_merge = pd.merge(test_data, standard_data, on='id', how='inner')
-    inner_merge.columns = ['id', 'test_mid_json', 'standard_mid_json', 'pass_label']
-
-    return inner_merge, standard_exist, test_exist
-
-def generate_filename(base_path):
-    """
-    生成带有当前时间戳的输出文件名。
-    参数:
-    - base_path: 基础路径和文件名前缀。
-    返回:
-    - 带有当前时间戳的完整输出文件名。
-    """
-    current_time = datetime.now().strftime('%Y-%m-%d_%H-%M-%S')
-    return f"{base_path}_{current_time}.json"
-
-def save_results(data_dict, file_path):
-    """
-    将数据字典保存为JSON文件至指定路径。
-    参数:
-    - data_dict: 包含数据的字典。
-    - file_path: 结果文件的保存路径,包括文件名。
-    """
-    with open(file_path, 'w', encoding='utf-8') as f:
-        json.dump(data_dict, f, ensure_ascii=False, indent=4)
-    print(f"结果已经保存到文件:{file_path}")
-
-
-def upload_to_s3(file_path, bucket_name, s3_directory, AWS_ACCESS_KEY, AWS_SECRET_KEY, END_POINT_URL):
-    """
-    上传文件到Amazon S3
-    """
-    # 创建S3客户端
-    s3 = boto3.client('s3', aws_access_key_id=AWS_ACCESS_KEY, aws_secret_access_key=AWS_SECRET_KEY, endpoint_url=END_POINT_URL)
-    try:
-        # 从文件路径中提取文件名
-        file_name = os.path.basename(file_path)
-        
-        # 创建S3对象键,将s3_directory和file_name连接起来
-        s3_object_key = f"{s3_directory}/{file_name}"  # 使用斜杠直接连接
-        
-        # 上传文件到S3
-        s3.upload_file(file_path, bucket_name, s3_object_key)
-        
-        print(f"文件 {file_path} 成功上传到S3存储桶 {bucket_name} 中的目录 {s3_directory},文件名为 {file_name}")
-    except FileNotFoundError:
-        print(f"文件 {file_path} 未找到,请检查文件路径是否正确。")
-    except NoCredentialsError:
-        print("无法找到AWS凭证,请确认您的AWS访问密钥和密钥ID是否正确。")
-    except ClientError as e:
-        print(f"上传文件时发生错误:{e}")
-
-
-
-
-
-def compare_edit_distance(json_file, overall_report):
-    with open(json_file, 'r',encoding='utf-8') as f:
-        json_data = json.load(f)
-    
-    json_edit_distance = json_data['pdf间的平均编辑距离']
-    
-    if overall_report['pdf间的平均编辑距离'] > json_edit_distance:
-        return 0
-    else:
-        return 1
-
-
-
-def main(standard_file, test_file, zip_file, overall_path, base_data_path, badcase_path=None, s3_bucket_name=None, s3_file_directory=None, 
-         aws_access_key=None, aws_secret_key=None, end_point_url=None):
-    """
-    主函数,执行整个评估流程。
-    
-    参数:
-    - standard_file: 标准文件的路径。
-    - test_file: 测试文件的路径。
-    - zip_file: 压缩包的路径的路径。
-    - badcase_path: badcase文件的基础路径和文件名前缀(可选)。
-    - overall_path: overall文件的基础路径和文件名前缀。
-    - base_data_path: 基础数据路径。
-    - s3_bucket_name: S3桶名称(可选)。
-    - s3_file_directory: S3上的文件保存目录(可选)。
-    - AWS_ACCESS_KEY, AWS_SECRET_KEY, END_POINT_URL: AWS访问凭证和端点URL(可选)。
-    """
-    # 检查文件是否存在
-    check_json_files_in_zip_exist(zip_file, standard_file, test_file)
-
-    # 读取JSON文件内容
-    json_standard_origin, json_test_origin = read_json_files_from_zip(zip_file, standard_file, test_file)
-
-    # 合并JSON数据
-    inner_merge, standard_exist, test_exist = merge_json_data(json_test_origin, json_standard_origin)
-
-    # 计算总体指标
-    overall_report_dict = overall_calculate_metrics(inner_merge, inner_merge['test_mid_json'], inner_merge['standard_mid_json'], standard_exist, test_exist)
-
-    # 生成带时间戳的输出文件名
-    if badcase_path:
-        badcase_file = generate_filename(badcase_path)
-        result_dict =  result_dict = calculate_metrics(inner_merge, inner_merge['test_mid_json'], inner_merge['standard_mid_json'], json_standard_origin)
-        save_results(result_dict, badcase_file)
-
-    overall_file = generate_filename(overall_path)
-    save_results(overall_report_dict, overall_file)
-
-    result = compare_edit_distance(base_data_path, overall_report_dict)
-
-    if all([s3_bucket_name, s3_file_directory, aws_access_key, aws_secret_key, end_point_url]):
-        try:
-            if badcase_path:
-                upload_to_s3(badcase_file, s3_bucket_name, s3_file_directory, aws_access_key, aws_secret_key, end_point_url)
-            upload_to_s3(overall_file, s3_bucket_name, s3_file_directory, aws_access_key, aws_secret_key, end_point_url)
-        except Exception as e:
-            print(f"上传到S3时发生错误: {e}")
-
-    print(result)
-    assert result == 1
-
-
-
-if __name__ == "__main__":
-    parser = argparse.ArgumentParser(description="主函数,执行整个评估流程。")
-    parser.add_argument('standard_file', type=str, help='标准文件的路径。')
-    parser.add_argument('test_file', type=str, help='测试文件的路径。')
-    parser.add_argument('zip_file', type=str, help='压缩包的路径。')
-    parser.add_argument('overall_path', type=str, help='overall文件的基础路径和文件名前缀。')
-    parser.add_argument('base_data_path', type=str, help='基准文件的基础路径和文件名前缀。')
-    parser.add_argument('--badcase_path', type=str, default=None, help='badcase文件的基础路径和文件名前缀(可选)。')
-    parser.add_argument('--s3_bucket_name', type=str, help='S3桶名称。', default=None)
-    parser.add_argument('--s3_file_directory', type=str, help='S3上的文件保存目录。', default=None)
-    parser.add_argument('--AWS_ACCESS_KEY', type=str, help='AWS访问密钥。', default=None)
-    parser.add_argument('--AWS_SECRET_KEY', type=str, help='AWS秘密密钥。', default=None)
-    parser.add_argument('--END_POINT_URL', type=str, help='AWS端点URL。', default=None)
-
-    args = parser.parse_args()
-
-    main(args.standard_file, args.test_file, args.zip_file, args.overall_path, args.base_data_path,
-         badcase_path=args.badcase_path, s3_bucket_name=args.s3_bucket_name, 
-         s3_file_directory=args.s3_file_directory, aws_access_key=args.AWS_ACCESS_KEY, 
-         aws_secret_key=args.AWS_SECRET_KEY, end_point_url=args.END_POINT_URL)
-

Fișier diff suprimat deoarece este prea mare
+ 0 - 0
tools/pdf_json_label_0229.json


Fișier diff suprimat deoarece este prea mare
+ 0 - 0
tools/pdf_json_label_0306.json


Fișier diff suprimat deoarece este prea mare
+ 0 - 724
tools/result.txt


+ 48 - 0
tools/scoring.py

@@ -0,0 +1,48 @@
+import math
+
+from rapidfuzz import fuzz
+import re
+import regex
+from statistics import mean
+
+CHUNK_MIN_CHARS = 25
+
+def chunk_text(text, chunk_len=500):
+    chunks = [text[i:i+chunk_len] for i in range(0, len(text), chunk_len)]
+    chunks = [c for c in chunks if c.strip() and len(c) > CHUNK_MIN_CHARS]
+    return chunks
+
+
+def overlap_score(hypothesis_chunks, reference_chunks):
+    if len(reference_chunks) > 0:
+        length_modifier = len(hypothesis_chunks) / len(reference_chunks)
+    else:
+        length_modifier = 0
+    search_distance = max(len(reference_chunks) // 5, 10)
+    chunk_scores = []
+    for i, hyp_chunk in enumerate(hypothesis_chunks):
+        max_score = 0
+        total_len = 0
+        i_offset = int(i * length_modifier)
+        chunk_range = range(max(0, i_offset-search_distance), min(len(reference_chunks), i_offset+search_distance))
+        for j in chunk_range:
+            ref_chunk = reference_chunks[j]
+            score = fuzz.ratio(hyp_chunk, ref_chunk, score_cutoff=30) / 100
+            if score > max_score:
+                max_score = score
+                total_len = len(ref_chunk)
+        chunk_scores.append(max_score)
+    return chunk_scores
+
+
+def score_text(hypothesis, reference):
+    # Returns a 0-1 alignment score
+    hypothesis_chunks = chunk_text(hypothesis)
+    reference_chunks = chunk_text(reference)
+    chunk_scores = overlap_score(hypothesis_chunks, reference_chunks)
+    if len(chunk_scores) > 0:
+        mean_score = mean(chunk_scores)
+        return mean_score
+    else:
+        return 0
+    #return mean(chunk_scores)

+ 0 - 901
tools/text_badcase.py

@@ -1,901 +0,0 @@
-import json
-import pandas as pd
-import numpy as np
-from nltk.translate.bleu_score import sentence_bleu, SmoothingFunction
-import argparse
-import os
-from sklearn.metrics import classification_report
-from sklearn import metrics
-from datetime import datetime
-import boto3
-from botocore.exceptions import NoCredentialsError, ClientError
-from io import TextIOWrapper
-import zipfile
-
-def Levenshtein_Distance(str1, str2):
-    """
-    计算并返回两个字符串之间的Levenshtein编辑距离。
-    
-    参数:
-    - str1: 字符串,第一个比较字符串。
-    - str2: 字符串,第二个比较字符串。
-    
-    返回:
-    - int: str1和str2之间的Levenshtein距离。
-    
-    方法:
-    - 使用动态规划构建一个矩阵(matrix),其中matrix[i][j]表示str1的前i个字符和str2的前j个字符之间的Levenshtein距离。
-    - 矩阵的初始值设定为边界情况,即一个字符串与空字符串之间的距离。
-    - 遍历矩阵填充每个格子的值,根据字符是否相等选择插入、删除或替换操作的最小代价。
-    """
-    # 初始化矩阵,大小为(len(str1)+1) x (len(str2)+1),边界情况下的距离为i和j
-    matrix = [[i + j for j in range(len(str2) + 1)] for i in range(len(str1) + 1)]
-
-    # 遍历str1和str2的每个字符,更新矩阵中的值
-    for i in range(1, len(str1) + 1):
-        for j in range(1, len(str2) + 1):
-            # 如果当前字符相等,替换代价为0;否则为1
-            d = 0 if (str1[i - 1] == str2[j - 1]) else 1
-            # 更新当前位置的值为从str1[i]转换到str2[j]的最小操作数
-            matrix[i][j] = min(matrix[i - 1][j] + 1,  # 删除操作
-                               matrix[i][j - 1] + 1,  # 插入操作
-                               matrix[i - 1][j - 1] + d)  # 替换操作
-    # 返回右下角的值,即str1和str2之间的Levenshtein距离
-    return matrix[len(str1)][len(str2)]
-
-
-def bbox_offset(b_t, b_s):
-    """
-    判断两个边界框(bounding box)之间的重叠程度是否符合给定的标准。
-    
-    参数:
-    - b_t: 测试文档中的边界框(bbox),格式为(x1, y1, x2, y2),
-           其中(x1, y1)是左上角的坐标,(x2, y2)是右下角的坐标。
-    - b_s: 标准文档中的边界框(bbox),格式同上。
-    
-    返回:
-    - True: 如果两个边界框的重叠面积与两个边界框合计面积的差的比例超过0.95,
-            表明它们足够接近。
-    - False: 否则,表示两个边界框不足够接近。
-    
-    注意:
-    - 函数首先计算两个bbox的交集区域,如果这个区域的面积相对于两个bbox的面积差非常大,
-      则认为这两个bbox足够接近。
-    - 如果交集区域的计算结果导致无效区域(比如宽度或高度为负值),或者分母为0(即两个bbox完全不重叠),
-      则函数会返回False。
-    """
-
-    # 分别提取两个bbox的坐标
-    x1_t, y1_t, x2_t, y2_t = b_t
-    x1_s, y1_s, x2_s, y2_s = b_s
-  
-    # 计算两个bbox交集区域的坐标
-    x1 = max(x1_t, x1_s)
-    x2 = min(x2_t, x2_s)
-    y1 = max(y1_t, y1_s)
-    y2 = min(y2_t, y2_s)
-    
-    # 如果计算出的交集区域有效,则计算其面积
-    if x2 > x1 and y2 > y1:
-        area_overlap = (x2 - x1) * (y2 - y1)
-    else:
-        # 交集区域无效,视为无重叠
-        area_overlap = 0
-
-    # 计算两个bbox的总面积,减去重叠部分避免重复计算
-    area_t = (x2_t - x1_t) * (y2_t - y1_t) + (x2_s - x1_s) * (y2_s - y1_s) - area_overlap
-
-    # 判断重叠面积是否符合标准
-    
-
-#     return area_overlap / total_area
-    if area_t-area_overlap==0 or area_overlap/area_t>0.95:
-        return True
-    else:
-        return False
-
-
-def equations_indicator(test_equations_bboxs, standard_equations_bboxs, test_equations, standard_equations):
-    """
-    根据边界框匹配的方程计算编辑距离和BLEU分数。
-    
-    参数:
-    - test_equations_bboxs: 测试方程的边界框列表。
-    - standard_equations_bboxs: 标准方程的边界框列表。
-    - test_equations: 测试方程的列表。
-    - standard_equations: 标准方程的列表。
-    
-    返回:
-    - 一个元组,包含匹配方程的平均Levenshtein编辑距离和BLEU分数。
-    """
-    
-    # 初始化匹配方程列表
-    test_match_equations = []
-    standard_match_equations = []
-
-    # 匹配方程基于边界框重叠
-    for index, (test_bbox, standard_bbox) in enumerate(zip(test_equations_bboxs, standard_equations_bboxs)):
-        if not (test_bbox and standard_bbox):  # 跳过任一空列表
-            continue
-        for i, sb in enumerate(standard_bbox):
-            for j, tb in enumerate(test_bbox):
-                if bbox_offset(sb, tb):
-                    standard_match_equations.append(standard_equations[index][i])
-                    test_match_equations.append(test_equations[index][j])
-                    break  # 找到第一个匹配后即跳出循环
-
-    # 使用Levenshtein距离和BLEU分数计算编辑距离
-    dis = [Levenshtein_Distance(a, b) for a, b in zip(test_match_equations, standard_match_equations) if a and b]
-    # 应用平滑函数计算BLEU分数
-    sm_func = SmoothingFunction().method1
-    bleu = [sentence_bleu([a.split()], b.split(), smoothing_function=sm_func) for a, b in zip(test_match_equations, standard_match_equations) if a and b]
-
-    # 计算平均编辑距离和BLEU分数,处理空列表情况
-    equations_edit = np.mean(dis) if dis else float('0.0')
-    equations_bleu = np.mean(bleu) if bleu else float('0.0')
-
-    return equations_edit, equations_bleu
-
-def bbox_match_indicator_general(test_bboxs_list, standard_bboxs_list):
-    """
-    计算边界框匹配指标,支持掉落的表格、图像和文本块。
-    此版本的函数专注于计算基于边界框的匹配指标,而不涉及标签匹配逻辑。
-    
-    参数:
-    - test_bboxs: 测试集的边界框列表,按页面组织。
-    - standard_bboxs: 标准集的边界框列表,按页面组织。
-
-    返回:
-    - 一个字典,包含准确度、精确度、召回率和F1分数。
-    """
-        # 如果两个列表都完全为空,返回0值指标
-    if all(len(page) == 0 for page in test_bboxs_list) and all(len(page) == 0 for page in standard_bboxs_list):
-        return {'accuracy': 0, 'precision': 0, 'recall': 0, 'f1_score': 0}
-    
-
-    matched_bbox = []
-    matched_standard_bbox = []
-
-    for test_page, standard_page in zip(test_bboxs_list, standard_bboxs_list):
-        test_page_bbox, standard_page_bbox = [], []
-        for standard_bbox in standard_page:
-            if len(standard_bbox) != 4:
-                continue
-            matched = False
-            for test_bbox in test_page:
-                if len(test_bbox) == 4 and bbox_offset(standard_bbox, test_bbox):
-                    matched = True
-                    break
-            test_page_bbox.append(int(matched))
-            standard_page_bbox.append(1)
-
-        # 后处理以处理多删情况,保持原逻辑不变
-        diff_num = len(test_page) + test_page_bbox.count(0) - len(standard_page)
-        if diff_num > 0:
-            test_page_bbox.extend([1] * diff_num)
-            standard_page_bbox.extend([0] * diff_num)
-
-        matched_bbox.extend(test_page_bbox)
-        matched_standard_bbox.extend(standard_page_bbox)
-
-    block_report = {
-        'accuracy': metrics.accuracy_score(matched_standard_bbox, matched_bbox),
-        'precision': metrics.precision_score(matched_standard_bbox, matched_bbox, zero_division=0),
-        'recall': metrics.recall_score(matched_standard_bbox, matched_bbox, zero_division=0),
-        'f1_score': metrics.f1_score(matched_standard_bbox, matched_bbox, zero_division=0)
-    }
-
-    return block_report
-
-
-
-def bbox_match_indicator_dropped_text_block(test_dropped_text_bboxs, standard_dropped_text_bboxs, standard_dropped_text_tag, test_dropped_text_tag):
-    """
-    计算丢弃文本块的边界框匹配相关指标,包括准确率、精确率、召回率和F1分数,
-    同时也计算文本块标签的匹配指标。
-
-    参数:
-    - test_dropped_text_bboxs: 测试集的丢弃文本块边界框列表
-    - standard_dropped_text_bboxs: 标准集的丢弃文本块边界框列表
-    - standard_dropped_text_tag: 标准集的丢弃文本块标签列表
-    - test_dropped_text_tag: 测试集的丢弃文本块标签列表
-
-    返回:
-    - 一个包含边界框匹配指标和文本块标签匹配指标的元组
-    """
-    test_text_bbox, standard_text_bbox = [], []
-    test_tag, standard_tag = [], []
-
-    for index, (test_page, standard_page) in enumerate(zip(test_dropped_text_bboxs, standard_dropped_text_bboxs)):
-        # 初始化每个页面的结果列表
-        test_page_tag, standard_page_tag = [], []
-        test_page_bbox, standard_page_bbox = [], []
-
-        for i, standard_bbox in enumerate(standard_page):
-            matched = False
-            for j, test_bbox in enumerate(test_page):
-                if bbox_offset(standard_bbox, test_bbox):
-                    # 匹配成功,记录标签和边界框匹配结果
-                    matched = True
-                    test_page_tag.append(test_dropped_text_tag[index][j])
-                    test_page_bbox.append(1)
-                    break
-
-            if not matched:
-                # 未匹配,记录'None'和边界框未匹配结果
-                test_page_tag.append('None')
-                test_page_bbox.append(0)
-
-            # 标准边界框和标签总是被视为匹配的
-            standard_page_tag.append(standard_dropped_text_tag[index][i])
-            standard_page_bbox.append(1)
-
-        # 处理可能的多删情况
-        handle_multi_deletion(test_page, test_page_tag, test_page_bbox, standard_page_tag, standard_page_bbox)
-
-        # 合并当前页面的结果到整体结果中
-        test_tag.extend(test_page_tag)
-        standard_tag.extend(standard_page_tag)
-        test_text_bbox.extend(test_page_bbox)
-        standard_text_bbox.extend(standard_page_bbox)
-
-    # 计算和返回匹配指标
-    if not standard_text_bbox or not test_text_bbox:
-        # print("警告:边界框列表为空,跳过性能指标的计算。")
-        text_block_report = {
-            'accuracy': np.nan,
-            'precision': np.nan,
-            'recall': np.nan,
-            'f1_score': np.nan
-        }
-    else:
-        text_block_report = {
-            'accuracy': metrics.accuracy_score(standard_text_bbox, test_text_bbox),
-            'precision': metrics.precision_score(standard_text_bbox, test_text_bbox, zero_division=0),
-            'recall': metrics.recall_score(standard_text_bbox, test_text_bbox, zero_division=0),
-            'f1_score': metrics.f1_score(standard_text_bbox, test_text_bbox, zero_division=0)
-        }
-
-    # 对于classification_report,确保至少有一个非'None'标签存在
-    labels = list(set(standard_tag) - {'None'})
-    if labels:
-        text_block_tag_report = classification_report(y_true=standard_tag, y_pred=test_tag, labels=labels, output_dict=True, zero_division=0)
-        # 删除不需要的平均值报告,以简化输出
-        text_block_tag_report.pop("macro avg", None)
-        text_block_tag_report.pop("weighted avg", None)
-    else:
-        # print("警告:无有效标签进行匹配,跳过标签匹配指标的计算。")
-        text_block_tag_report = {}
-
-    return text_block_report, text_block_tag_report
-
-
-
-def handle_multi_deletion(test_page, test_page_tag, test_page_bbox, standard_page_tag, standard_page_bbox):
-    """
-    处理多删情况,即测试页面的边界框或标签数量多于标准页面。
-    """
-    excess_count = len(test_page) + test_page_bbox.count(0) - len(standard_page_tag)
-    if excess_count > 0:
-        # 对于多出的项,将它们视为正确匹配的边界框,但标签视为'None'
-        test_page_bbox.extend([1] * excess_count)
-        standard_page_bbox.extend([0] * excess_count)
-        test_page_tag.extend(['None'] * excess_count)
-        standard_page_tag.extend(['None'] * excess_count)
-
-
-def read_json_files(standard_file, test_file):
-    """
-    读取JSON文件内容
-    """
-    with open(standard_file, 'r', encoding='utf-8') as sf:
-        pdf_json_standard = [json.loads(line) for line in sf]
-
-    with open(test_file, 'r', encoding='utf-8') as tf:
-        pdf_json_test = [json.loads(line) for line in tf]
-
-    json_standard_origin = pd.DataFrame(pdf_json_standard)
-    json_test = pd.DataFrame(pdf_json_test)
-
-    return json_standard_origin, json_test
-
-
-def merge_json_data(json_test_df, json_standard_df):
-    """
-    基于ID合并测试和标准数据集,并返回合并后的数据及存在性检查结果。
-
-    参数:
-    - json_test_df: 测试数据的DataFrame。
-    - json_standard_df: 标准数据的DataFrame。
-
-    返回:
-    - inner_merge: 内部合并的DataFrame,包含匹配的数据行。
-    - standard_exist: 标准数据存在性的Series。
-    - test_exist: 测试数据存在性的Series。
-    """
-    test_data = json_test_df[['id', 'mid_json']].drop_duplicates(subset='id', keep='first').reset_index(drop=True)
-    standard_data = json_standard_df[['id', 'mid_json', 'pass_label']].drop_duplicates(subset='id', keep='first').reset_index(drop=True)
-
-    outer_merge = pd.merge(test_data, standard_data, on='id', how='outer')
-    outer_merge.columns = ['id', 'test_mid_json', 'standard_mid_json', 'pass_label']
-    
-    standard_exist = outer_merge.standard_mid_json.notnull()
-    test_exist = outer_merge.test_mid_json.notnull()
-
-    inner_merge = pd.merge(test_data, standard_data, on='id', how='inner')
-    inner_merge.columns = ['id', 'test_mid_json', 'standard_mid_json', 'pass_label']
-
-    return inner_merge, standard_exist, test_exist
-
-
-def process_equations_and_blocks(json_data):
-    """
-    处理JSON数据,提取公式、文本块、图片块和表格块的边界框和文本信息。
-    
-    参数:
-    - json_data: 列表,包含标准文档或测试文档的JSON数据。
-    
-    返回:
-    - 字典,包含处理后的数据。
-    """
-    equations_bboxs = {"inline": [], "interline": []}
-    equations_texts = {"inline": [], "interline": []}
-    dropped_bboxs = {"text": [], "image": [], "table": []}
-    dropped_tags = {"text": []}
-    para_texts = []
-    para_nums = []
-    preproc_nums = []
-
-
-    for i in json_data:
-        mid_json = pd.DataFrame(i).iloc[:,:-1] 
-        page_data = {
-            "equations_bboxs_list": {"inline": [], "interline": []},
-            "equations_texts_list": {"inline": [], "interline": []},
-            "dropped_bboxs_list": {"text": [], "image": [], "table": []},
-            "dropped_tags_list": {"text": []},
-            "para_texts_list": [],
-            "para_nums_list": [],
-            "preproc_nums_list":[]
-        }
-
-        for eq_type in ["inline", "interline"]:
-            for equations in mid_json.loc[f"{eq_type}_equations", :]:
-                bboxs = [eq['bbox'] for eq in equations]
-                texts = [eq['latex_text'] for eq in equations]
-                page_data["equations_bboxs_list"][eq_type].append(bboxs)
-                page_data["equations_texts_list"][eq_type].append(texts)
-        
-        equations_bboxs["inline"].append(page_data["equations_bboxs_list"]["inline"])
-        equations_bboxs["interline"].append(page_data["equations_bboxs_list"]["interline"])
-        equations_texts["inline"].append(page_data["equations_texts_list"]["inline"])
-        equations_texts["interline"].append(page_data["equations_texts_list"]["interline"])
-
-
-        # 提取丢弃的文本块信息
-        for dropped_text_blocks in mid_json.loc['droped_text_block',:]:
-            bboxs, tags = [], []
-            for block in dropped_text_blocks:
-                bboxs.append(block['bbox'])
-                tags.append(block.get('tag', 'None'))
-            
-            page_data["dropped_bboxs_list"]["text"].append(bboxs)
-            page_data["dropped_tags_list"]["text"].append(tags)
-        
-        dropped_bboxs["text"].append(page_data["dropped_bboxs_list"]["text"])
-        dropped_tags["text"].append(page_data["dropped_tags_list"]["text"])
-
-
-      
-        # 同时处理删除的图片块和表格块
-        for block_type in ['image', 'table']:
-            # page_blocks_list = []
-            for blocks in mid_json.loc[f'droped_{block_type}_block', :]:
-                # 如果是标准数据,直接添加整个块的列表
-                page_data["dropped_bboxs_list"][block_type].append(blocks)
-
-            
-        # 将当前页面的块边界框列表添加到结果字典中
-        dropped_bboxs['image'].append(page_data["dropped_bboxs_list"]['image'])
-        dropped_bboxs['table'].append(page_data["dropped_bboxs_list"]['table'])
-        
-        
-        # 处理段落
-        for para_blocks in mid_json.loc['para_blocks', :]:
-            page_data["para_nums_list"].append(len(para_blocks))  # 计算段落数
-            for para_block in para_blocks:
-                page_data["para_texts_list"].append(para_block['text'])
-        
-        for preproc_blocks in mid_json.loc['preproc_blocks', :]:
-            numbers=[]
-            for preproc_block in preproc_blocks:
-                numbers.append(preproc_block['number'])
-            
-            page_data["preproc_nums_list"].append(numbers)
-            
-            
-        
-        para_texts.append(page_data["para_texts_list"])
-        para_nums.append(page_data["para_nums_list"])
-        preproc_nums.append(page_data["preproc_nums_list"])
-
-        
-    return {
-        "equations_bboxs": equations_bboxs,
-        "equations_texts": equations_texts,
-        "dropped_bboxs": dropped_bboxs,
-        "dropped_tags": dropped_tags,
-        "para_texts": para_texts,
-        "para_nums": para_nums,
-        "preproc_nums": preproc_nums
-    }
-
-
-def consolidate_data(test_data, standard_data, key_path):
-    """
-    Consolidates data from test and standard datasets based on the provided key path.
-    
-    :param test_data: Dictionary containing the test dataset.
-    :param standard_data: Dictionary containing the standard dataset.
-    :param key_path: List of keys leading to the desired data within the dictionaries.
-    :return: List containing all items from both test and standard data at the specified key path.
-    """
-    # Initialize an empty list to hold the consolidated data
-    overall_data_standard = []
-    overall_data_test = []
-    
-    # Helper function to recursively navigate through the dictionaries based on the key path
-    def extract_data(source_data, keys):
-        for key in keys[:-1]:
-            source_data = source_data.get(key, {})
-        return source_data.get(keys[-1], [])
-    
-    for data in extract_data(standard_data, key_path):
-    # 假设每个 single_table_tags 已经是一个列表,直接将它的元素添加到总列表中
-        overall_data_standard.extend(data)
-    
-    for data in extract_data(test_data, key_path):
-         overall_data_test.extend(data)
-    # Extract and extend the overall data list with items from both test and standard datasets
-
-    
-    return overall_data_standard, overall_data_test
-
-
-def calculate_metrics(inner_merge, json_test, json_standard, json_standard_origin):
-    """
-    计算指标
-    """
-    # 创建ID到file_id的映射
-    id_to_file_id_map = pd.Series(json_standard_origin.file_id.values, index=json_standard_origin.id).to_dict()
-
-    # 处理标准数据和测试数据
-    process_data_standard = process_equations_and_blocks(json_standard)
-    process_data_test = process_equations_and_blocks(json_test)
-
-    # 从inner_merge中筛选出pass_label为'yes'的数据
-    test_para_text = np.asarray(process_data_test['para_texts'], dtype=object)[inner_merge['pass_label'] == 'yes']
-    standard_para_text = np.asarray(process_data_standard['para_texts'], dtype=object)[inner_merge['pass_label'] == 'yes']
-    ids_yes = inner_merge['id'][inner_merge['pass_label'] == 'yes'].tolist()
-
-    pdf_dis = {}
-    pdf_bleu = {}
-
-    # 对pass_label为'yes'的数据计算编辑距离和BLEU得分
-    for idx, (a, b, id) in enumerate(zip(test_para_text, standard_para_text, ids_yes)):
-        a1 = ''.join(a)
-        b1 = ''.join(b)
-        pdf_dis[id] = Levenshtein_Distance(a, b)
-        pdf_bleu[id] = sentence_bleu([a1], b1)
-
-        
-    result_dict = {}
-    acc_para=[]
-    
-
- 
-    
-    # 对所有数据计算其他指标
-    for index, id_value in enumerate(inner_merge['id'].tolist()):
-        result = {}
-        
-        # 增加file_id到结果中
-        file_id = id_to_file_id_map.get(id_value, "Unknown")
-        result['file_id'] = file_id
-        
-
-        
-        # 根据id判断是否需要计算pdf_dis和pdf_bleu
-        if id_value in ids_yes:
-            result['pdf_dis'] = pdf_dis[id_value]
-            result['pdf_bleu'] = pdf_bleu[id_value]
-        
-        # 阅读顺序编辑距离的均值
-        preproc_num_dis=[]
-        for a,b in zip(process_data_test['preproc_nums'][index],process_data_standard['preproc_nums'][index]):
-            preproc_num_dis.append(Levenshtein_Distance(a,b))
-        result['阅读顺序编辑距离']=np.mean(preproc_num_dis) 
-
-
-        # 计算分段准确率
-        single_test_para_num = np.array(process_data_test['para_nums'][index])
-        single_standard_para_num = np.array(process_data_standard['para_nums'][index])
-        acc_para.append(np.mean(single_test_para_num == single_standard_para_num))
-        
-        result['分段准确率'] = acc_para[index]
-    
-        # 行内公式准确率和编辑距离、bleu
-        result['行内公式准确率'] = bbox_match_indicator_general(
-            process_data_test["equations_bboxs"]["inline"][index],
-            process_data_standard["equations_bboxs"]["inline"][index])
-        
-        result['行内公式编辑距离'], result['行内公式bleu'] = equations_indicator(
-            process_data_test["equations_bboxs"]["inline"][index],
-            process_data_standard["equations_bboxs"]["inline"][index],
-            process_data_test["equations_texts"]["inline"][index],
-            process_data_standard["equations_texts"]["inline"][index])
-
-        # 行间公式准确率和编辑距离、bleu
-        result['行间公式准确率'] = bbox_match_indicator_general(
-            process_data_test["equations_bboxs"]["interline"][index],
-            process_data_standard["equations_bboxs"]["interline"][index])
-        
-        result['行间公式编辑距离'], result['行间公式bleu'] = equations_indicator(
-            process_data_test["equations_bboxs"]["interline"][index],
-            process_data_standard["equations_bboxs"]["interline"][index],
-            process_data_test["equations_texts"]["interline"][index],
-            process_data_standard["equations_texts"]["interline"][index])
-
-        # 丢弃文本准确率,丢弃文本标签准确率
-        result['丢弃文本准确率'], result['丢弃文本标签准确率'] = bbox_match_indicator_dropped_text_block(
-            process_data_test["dropped_bboxs"]["text"][index],
-            process_data_standard["dropped_bboxs"]["text"][index],
-            process_data_standard["dropped_tags"]["text"][index],
-            process_data_test["dropped_tags"]["text"][index])
-
-        # 丢弃图片准确率
-        result['丢弃图片准确率'] = bbox_match_indicator_general(
-            process_data_test["dropped_bboxs"]["image"][index],
-            process_data_standard["dropped_bboxs"]["image"][index])
-
-        # 丢弃表格准确率
-        result['丢弃表格准确率'] = bbox_match_indicator_general(
-            process_data_test["dropped_bboxs"]["table"][index],
-            process_data_standard["dropped_bboxs"]["table"][index])
-
-
-        # 将结果存入result_dict
-        result_dict[id_value] = result
-
-    return result_dict
-
-
-def overall_calculate_metrics(inner_merge, json_test, json_standard,standard_exist, test_exist):
-    """
-    计算整体指标:包括准确性、精确度、召回率、F1分数以及不同方面的详细指标。
-
-    参数:
-    - inner_merge: 合并后的内部数据,包含测试和标准数据的合并结果。
-    - json_test: 测试数据的JSON格式。
-    - json_standard: 标准数据的JSON格式。
-    - standard_exist: 标准存在的标签数据。
-    - test_exist: 测试存在的标签数据。
-
-    返回值:
-    - overall_report: 包含各种指标的字典。
-    """
-
-    # 处理标准数据和测试数据,提取方程式和块
-    process_data_standard = process_equations_and_blocks(json_standard)
-    process_data_test = process_equations_and_blocks(json_test)
-
-    # 初始化整体报告,并计算基础指标
-    overall_report = {}
-    overall_report['accuracy']=metrics.accuracy_score(standard_exist,test_exist)
-    overall_report['precision']=metrics.precision_score(standard_exist,test_exist)
-    overall_report['recall']=metrics.recall_score(standard_exist,test_exist)
-    overall_report['f1_score']=metrics.f1_score(standard_exist,test_exist)
-    overall_report
-
-    # 提取通过标签的数据,并计算编辑距离和BLEU得分
-    test_para_text = np.asarray(process_data_test['para_texts'], dtype=object)[inner_merge['pass_label'] == 'yes']
-    standard_para_text = np.asarray(process_data_standard['para_texts'], dtype=object)[inner_merge['pass_label'] == 'yes']
-    ids_yes = inner_merge['id'][inner_merge['pass_label'] == 'yes'].tolist()
-
-    pdf_dis = {}
-    pdf_bleu = {}
-
-    for idx,(a, b, id) in enumerate(zip(test_para_text, standard_para_text, ids_yes)):
-        a1 = ''.join(a)
-        b1 = ''.join(b)
-        pdf_dis[id] = Levenshtein_Distance(a, b)
-        pdf_bleu[id] = sentence_bleu([a1], b1)
-
-    overall_report['pdf间的平均编辑距离'] = np.mean(list(pdf_dis.values()))
-    overall_report['pdf间的平均bleu'] = np.mean(list(pdf_bleu.values()))
-
-    # 合并数据中的方程式bbox和inline数据
-    overall_equations_bboxs_inline_standard,overall_equations_bboxs_inline_test = consolidate_data(process_data_test, process_data_standard, ["equations_bboxs", "inline"])
-
-    # 合并数据中的方程式文本和inline数据
-    overall_equations_texts_inline_standard,overall_equations_texts_inline_test = consolidate_data(process_data_test, process_data_standard, ["equations_texts", "inline"])
-
-    # 合并数据中的方程式bbox和interline数据
-    overall_equations_bboxs_interline_standard,overall_equations_bboxs_interline_test = consolidate_data(process_data_test, process_data_standard, ["equations_bboxs", "interline"])
-
-    # 合并数据中的方程式文本和interline数据
-    overall_equations_texts_interline_standard,overall_equations_texts_interline_test = consolidate_data(process_data_test, process_data_standard, ["equations_texts", "interline"])
-
-    # 合并丢弃的bbox和text数据
-    overall_dropped_bboxs_text_standard,overall_dropped_bboxs_text_test = consolidate_data(process_data_test, process_data_standard, ["dropped_bboxs","text"])
-
-    # 合并丢弃的tags和text数据
-    overall_dropped_tags_text_standard,overall_dropped_tags_text_test = consolidate_data(process_data_test, process_data_standard, ["dropped_tags","text"])
-
-    # 合并丢弃的bbox和image数据
-    overall_dropped_bboxs_image_standard,overall_dropped_bboxs_image_test = consolidate_data(process_data_test, process_data_standard, ["dropped_bboxs","image"])
-
-    # 合并丢弃的bbox和table数据
-    overall_dropped_bboxs_table_standard,overall_dropped_bboxs_table_test=consolidate_data(process_data_test, process_data_standard,["dropped_bboxs","table"])
-
-    # 合并阅读顺序的编辑距离
-    overall_preproc_standard,overall_preproc_test = consolidate_data(process_data_test, process_data_standard, ["preproc_nums"])
-
-    # 计算测试和标准数据的段落数量
-    para_nums_test = process_data_test['para_nums']
-    para_nums_standard=process_data_standard['para_nums']
-    overall_para_nums_standard = [item for sublist in para_nums_standard for item in (sublist if isinstance(sublist, list) else [sublist])]
-    overall_para_nums_test = [item for sublist in para_nums_test for item in (sublist if isinstance(sublist, list) else [sublist])]
-
-
-    preproc_num_dis=[]
-    for a,b in zip(overall_preproc_standard,overall_preproc_test):
-        preproc_num_dis.append(Levenshtein_Distance(a,b))
-    overall_report['阅读顺序编辑距离']=np.mean(preproc_num_dis) 
-
-    # 计算段落匹配准确性
-    test_para_num=np.array(overall_para_nums_test)
-    standard_para_num=np.array(overall_para_nums_standard)
-    acc_para=np.mean(test_para_num==standard_para_num)
-
-    overall_report['分段准确率'] = acc_para
-
-    # 计算并更新报告中的各种指标
-    overall_report['行内公式准确率'] = bbox_match_indicator_general(
-        overall_equations_bboxs_inline_test,
-        overall_equations_bboxs_inline_standard)
-
-    overall_report['行内公式编辑距离'], overall_report['行内公式bleu'] = equations_indicator(
-        overall_equations_bboxs_inline_test,
-        overall_equations_bboxs_inline_standard,
-        overall_equations_texts_inline_test,
-        overall_equations_texts_inline_standard)
-
-    overall_report['行间公式准确率'] = bbox_match_indicator_general(
-        overall_equations_bboxs_interline_test,
-        overall_equations_bboxs_interline_standard)
-
-    overall_report['行间公式编辑距离'], overall_report['行间公式bleu'] = equations_indicator(
-        overall_equations_bboxs_interline_test,
-        overall_equations_bboxs_interline_standard,
-        overall_equations_texts_interline_test,
-        overall_equations_texts_interline_standard)
-
-    overall_report['丢弃文本准确率'], overall_report['丢弃文本标签准确率'] = bbox_match_indicator_dropped_text_block(
-        overall_dropped_bboxs_text_test,
-        overall_dropped_bboxs_text_standard,
-        overall_dropped_tags_text_standard,
-        overall_dropped_tags_text_test)
-
-    overall_report['丢弃图片准确率'] = bbox_match_indicator_general(
-        overall_dropped_bboxs_image_test,
-        overall_dropped_bboxs_image_standard)
-
-    overall_report['丢弃表格准确率'] = bbox_match_indicator_general(
-        overall_dropped_bboxs_table_test,
-        overall_dropped_bboxs_table_standard)
-
-    return overall_report
-
-
-
-def check_json_files_in_zip_exist(zip_file_path, standard_json_path_in_zip, test_json_path_in_zip):
-    """
-    检查ZIP文件中是否存在指定的JSON文件
-    """
-    with zipfile.ZipFile(zip_file_path, 'r') as z:
-        # 获取ZIP文件中所有文件的列表
-        all_files_in_zip = z.namelist()
-        # 检查标准文件和测试文件是否都在ZIP文件中
-        if standard_json_path_in_zip not in all_files_in_zip or test_json_path_in_zip not in all_files_in_zip:
-            raise FileNotFoundError("One or both of the required JSON files are missing from the ZIP archive.")
-
-def read_json_files_from_streams(standard_file_stream, test_file_stream):
-    """
-    从文件流中读取JSON文件内容
-    """
-    pdf_json_standard = [json.loads(line) for line in standard_file_stream]
-    pdf_json_test = [json.loads(line) for line in test_file_stream]
-
-    json_standard_origin = pd.DataFrame(pdf_json_standard)
-    json_test_origin = pd.DataFrame(pdf_json_test)
-
-    return json_standard_origin, json_test_origin
-
-
-def read_json_files_from_zip(zip_file_path, standard_json_path_in_zip, test_json_path_in_zip):
-    """
-    从ZIP文件中读取两个JSON文件并返回它们的DataFrame
-    """
-    with zipfile.ZipFile(zip_file_path, 'r') as z:
-        with z.open(standard_json_path_in_zip) as standard_file_stream, \
-             z.open(test_json_path_in_zip) as test_file_stream:
-
-            standard_file_text_stream = TextIOWrapper(standard_file_stream, encoding='utf-8')
-            test_file_text_stream = TextIOWrapper(test_file_stream, encoding='utf-8')
-
-            json_standard_origin, json_test_origin = read_json_files_from_streams(
-                standard_file_text_stream, test_file_text_stream
-            )
-    
-    return json_standard_origin, json_test_origin
-
-
-def merge_json_data(json_test_df, json_standard_df):
-    """
-    基于ID合并测试和标准数据集,并返回合并后的数据及存在性检查结果。
-
-    参数:
-    - json_test_df: 测试数据的DataFrame。
-    - json_standard_df: 标准数据的DataFrame。
-
-    返回:
-    - inner_merge: 内部合并的DataFrame,包含匹配的数据行。
-    - standard_exist: 标准数据存在性的Series。
-    - test_exist: 测试数据存在性的Series。
-    """
-    test_data = json_test_df[['id', 'mid_json']].drop_duplicates(subset='id', keep='first').reset_index(drop=True)
-    standard_data = json_standard_df[['id', 'mid_json', 'pass_label']].drop_duplicates(subset='id', keep='first').reset_index(drop=True)
-
-    outer_merge = pd.merge(test_data, standard_data, on='id', how='outer')
-    outer_merge.columns = ['id', 'test_mid_json', 'standard_mid_json', 'pass_label']
-
-    standard_exist = outer_merge.standard_mid_json.notnull()
-    test_exist = outer_merge.test_mid_json.notnull()
-
-    inner_merge = pd.merge(test_data, standard_data, on='id', how='inner')
-    inner_merge.columns = ['id', 'test_mid_json', 'standard_mid_json', 'pass_label']
-
-    return inner_merge, standard_exist, test_exist
-
-def generate_filename(base_path):
-    """
-    生成带有当前时间戳的输出文件名。
-    参数:
-    - base_path: 基础路径和文件名前缀。
-    返回:
-    - 带有当前时间戳的完整输出文件名。
-    """
-    current_time = datetime.now().strftime('%Y-%m-%d_%H-%M-%S')
-    return f"{base_path}_{current_time}.json"
-
-def save_results(data_dict, file_path):
-    """
-    将数据字典保存为JSON文件至指定路径。
-    参数:
-    - data_dict: 包含数据的字典。
-    - file_path: 结果文件的保存路径,包括文件名。
-    """
-    with open(file_path, 'w', encoding='utf-8') as f:
-        json.dump(data_dict, f, ensure_ascii=False, indent=4)
-    print(f"结果已经保存到文件:{file_path}")
-
-
-    
-def upload_to_s3(file_path, bucket_name, s3_directory, AWS_ACCESS_KEY, AWS_SECRET_KEY, END_POINT_URL):
-    """
-    上传文件到Amazon S3
-    """
-    # 创建S3客户端
-    s3 = boto3.client('s3', aws_access_key_id=AWS_ACCESS_KEY, aws_secret_access_key=AWS_SECRET_KEY, endpoint_url=END_POINT_URL)
-    try:
-        # 从文件路径中提取文件名
-        file_name = os.path.basename(file_path)
-        
-        # 创建S3对象键,将s3_directory和file_name连接起来
-        s3_object_key = f"{s3_directory}/{file_name}"  # 使用斜杠直接连接
-        
-        # 上传文件到S3
-        s3.upload_file(file_path, bucket_name, s3_object_key)
-        
-        print(f"文件 {file_path} 成功上传到S3存储桶 {bucket_name} 中的目录 {s3_directory},文件名为 {file_name}")
-    except FileNotFoundError:
-        print(f"文件 {file_path} 未找到,请检查文件路径是否正确。")
-    except NoCredentialsError:
-        print("无法找到AWS凭证,请确认您的AWS访问密钥和密钥ID是否正确。")
-    except ClientError as e:
-        print(f"上传文件时发生错误:{e}")
-
-
-
-def compare_edit_distance(json_file, overall_report):
-    with open(json_file, 'r',encoding='utf-8') as f:
-        json_data = json.load(f)
-    
-    json_edit_distance = json_data['pdf间的平均编辑距离']
-    
-    if overall_report['pdf间的平均编辑距离'] > json_edit_distance:
-        return 0
-    else:
-        return 1
-    
-def main(standard_file, test_file, zip_file, overall_path, base_data_path, badcase_path=None, s3_bucket_name=None, s3_file_directory=None, 
-         aws_access_key=None, aws_secret_key=None, end_point_url=None):
-    """
-    主函数,执行整个评估流程。
-    
-    参数:
-    - standard_file: 标准文件的路径。
-    - test_file: 测试文件的路径。
-    - zip_file: 压缩包的路径的路径。
-    - badcase_path: badcase文件的基础路径和文件名前缀(可选)。
-    - overall_path: overall文件的基础路径和文件名前缀。
-    - base_data_path: 基础数据路径。
-    - s3_bucket_name: S3桶名称(可选)。
-    - s3_file_directory: S3上的文件保存目录(可选)。
-    - AWS_ACCESS_KEY, AWS_SECRET_KEY, END_POINT_URL: AWS访问凭证和端点URL(可选)。
-    """
-
-    # 检查文件是否存在
-    check_json_files_in_zip_exist(zip_file, standard_file, test_file)
-
-    # 读取JSON文件内容
-    json_standard_origin, json_test_origin = read_json_files_from_zip(zip_file, standard_file, test_file)
-
-    # 合并JSON数据
-    inner_merge, standard_exist, test_exist = merge_json_data(json_test_origin, json_standard_origin)
-
-    #计算总体指标
-    overall_report_dict=overall_calculate_metrics(inner_merge, inner_merge['test_mid_json'], inner_merge['standard_mid_json'],standard_exist, test_exist)
-    # 生成带时间戳的输出文件名
-    if badcase_path:
-        badcase_file = generate_filename(badcase_path)
-        result_dict =  result_dict = calculate_metrics(inner_merge, inner_merge['test_mid_json'], inner_merge['standard_mid_json'], json_standard_origin)
-        save_results(result_dict, badcase_file)
-
-    overall_file = generate_filename(overall_path)
-    save_results(overall_report_dict, overall_file)
-
-    result = compare_edit_distance(base_data_path, overall_report_dict)
-
-    if all([s3_bucket_name, s3_file_directory, aws_access_key, aws_secret_key, end_point_url]):
-        try:
-            if badcase_path:
-                upload_to_s3(badcase_file, s3_bucket_name, s3_file_directory, aws_access_key, aws_secret_key, end_point_url)
-            upload_to_s3(overall_file, s3_bucket_name, s3_file_directory, aws_access_key, aws_secret_key, end_point_url)
-        except Exception as e:
-            print(f"上传到S3时发生错误: {e}")
-
-    print(result)
-    assert result == 1
-
-if __name__ == "__main__":
-    parser = argparse.ArgumentParser(description="主函数,执行整个评估流程。")
-    parser.add_argument('standard_file', type=str, help='标准文件的路径。')
-    parser.add_argument('test_file', type=str, help='测试文件的路径。')
-    parser.add_argument('zip_file', type=str, help='压缩包的路径。')
-    parser.add_argument('overall_path', type=str, help='overall文件的基础路径和文件名前缀。')
-    parser.add_argument('base_data_path', type=str, help='基准文件的基础路径和文件名前缀。')
-    parser.add_argument('--badcase_path', type=str, default=None, help='badcase文件的基础路径和文件名前缀(可选)。')
-    parser.add_argument('--s3_bucket_name', type=str, help='S3桶名称。', default=None)
-    parser.add_argument('--s3_file_directory', type=str, help='S3上的文件保存目录。', default=None)
-    parser.add_argument('--AWS_ACCESS_KEY', type=str, help='AWS访问密钥。', default=None)
-    parser.add_argument('--AWS_SECRET_KEY', type=str, help='AWS秘密密钥。', default=None)
-    parser.add_argument('--END_POINT_URL', type=str, help='AWS端点URL。', default=None)
-
-    args = parser.parse_args()
-
-    main(args.standard_file, args.test_file, args.zip_file, args.overall_path, args.base_data_path,
-         badcase_path=args.badcase_path, s3_bucket_name=args.s3_bucket_name, 
-         s3_file_directory=args.s3_file_directory, aws_access_key=args.AWS_ACCESS_KEY, 
-         aws_secret_key=args.AWS_SECRET_KEY, end_point_url=args.END_POINT_URL)

Unele fișiere nu au fost afișate deoarece prea multe fișiere au fost modificate în acest diff