Browse Source

Merge pull request #19 from myhloli/master

Trace类的key增加前置下划线, 实现UNIPipe
drunkpig 1 year ago
parent
commit
bb67636205

+ 1 - 3
demo/text_demo.py

@@ -67,9 +67,7 @@ def demo_classify_by_type(book_name=None, debug_mode=True):
     img_num_list = pdf_meta["imgs_per_page"]
     text_len_list = pdf_meta["text_len_per_page"]
     text_layout_list = pdf_meta["text_layout_per_page"]
-    pdf_path = json_object.get("file_location")
     is_text_pdf, results = classify(
-        pdf_path,
         total_page,
         page_width,
         page_height,
@@ -89,7 +87,7 @@ def demo_meta_scan(book_name=None, debug_mode=True):
     s3_pdf_path = json_object.get("file_location")
     s3_config = get_s3_config_dict(s3_pdf_path)
     pdf_bytes = read_file(s3_pdf_path, s3_config)
-    res = pdf_meta_scan(s3_pdf_path, pdf_bytes)
+    res = pdf_meta_scan(pdf_bytes)
 
     logger.info(json.dumps(res, ensure_ascii=False))
     write_json_to_local(res, book_name)

+ 4 - 3
magic_pdf/dict2md/mkcontent.py

@@ -2,6 +2,7 @@ import math
 from loguru import logger
 
 from magic_pdf.libs.boxbase import find_bottom_nearest_text_bbox, find_top_nearest_text_bbox
+from magic_pdf.libs.commons import join_path
 from magic_pdf.libs.ocr_content_type import ContentType
 
 TYPE_INLINE_EQUATION = ContentType.InlineEquation
@@ -227,7 +228,7 @@ def __insert_before_para(text, type, element, content_list):
         logger.error(f"Can't find the location of image {element.get('image_path')} in the markdown file, search target is {text}")
          
 
-def mk_universal_format(para_dict: dict):
+def mk_universal_format(para_dict: dict, img_buket_path):
     """
     构造统一格式 https://aicarrier.feishu.cn/wiki/FqmMwcH69iIdCWkkyjvcDwNUnTY
     """
@@ -249,7 +250,7 @@ def mk_universal_format(para_dict: dict):
             for img in all_page_images:
                 content_node = {
                     "type": "image",
-                    "img_path": img['image_path'],
+                    "img_path": join_path(img_buket_path, img['image_path']),
                     "img_alt":"",
                     "img_title":"",
                     "img_caption":""
@@ -258,7 +259,7 @@ def mk_universal_format(para_dict: dict):
             for table in all_page_tables:
                 content_node = {
                     "type": "table",
-                    "img_path": table['image_path'],
+                    "img_path": join_path(img_buket_path, table['image_path']),
                     "table_latex": table.get("text"),
                     "table_title": "",
                     "table_caption": "",

+ 8 - 7
magic_pdf/dict2md/ocr_mkcontent.py

@@ -1,3 +1,4 @@
+from magic_pdf.libs.commons import join_path
 from magic_pdf.libs.language import detect_lang
 from magic_pdf.libs.markdown_utils import ocr_escape_special_markdown_char
 from magic_pdf.libs.ocr_content_type import ContentType
@@ -137,10 +138,10 @@ def ocr_mk_markdown_with_para_core(paras_of_layout, mode):
     return page_markdown
 
 
-def para_to_standard_format(para):
+def para_to_standard_format(para, img_buket_path):
     para_content = {}
     if len(para) == 1:
-        para_content = line_to_standard_format(para[0])
+        para_content = line_to_standard_format(para[0], img_buket_path)
     elif len(para) > 1:
         para_text = ''
         inline_equation_num = 0
@@ -170,7 +171,7 @@ def para_to_standard_format(para):
         }
     return para_content
 
-def make_standard_format_with_para(pdf_info_dict: dict):
+def make_standard_format_with_para(pdf_info_dict: dict, img_buket_path: str):
     content_list = []
     for _, page_info in pdf_info_dict.items():
         paras_of_layout = page_info.get("para_blocks")
@@ -178,12 +179,12 @@ def make_standard_format_with_para(pdf_info_dict: dict):
             continue
         for paras in paras_of_layout:
             for para in paras:
-                para_content = para_to_standard_format(para)
+                para_content = para_to_standard_format(para, img_buket_path)
                 content_list.append(para_content)
     return content_list
 
 
-def line_to_standard_format(line):
+def line_to_standard_format(line, img_buket_path):
     line_text = ""
     inline_equation_num = 0
     for span in line['spans']:
@@ -194,13 +195,13 @@ def line_to_standard_format(line):
                 if span['type'] == ContentType.Image:
                     content = {
                         'type': 'image',
-                        'img_path': span['image_path']
+                        'img_path': join_path(img_buket_path, span['image_path'])
                     }
                     return content
                 elif span['type'] == ContentType.Table:
                     content = {
                         'type': 'table',
-                        'img_path': span['image_path']
+                        'img_path': join_path(img_buket_path, span['image_path'])
                     }
                     return content
         else:

+ 4 - 3
magic_pdf/filter/pdf_classify_by_type.py

@@ -15,6 +15,7 @@ from collections import Counter
 
 import click
 import numpy as np
+from loguru import logger
 
 from magic_pdf.libs.commons import mymax, get_top_percent_list
 from magic_pdf.filter.pdf_meta_scan import scan_max_page, junk_limit_min
@@ -298,7 +299,7 @@ def classify_by_img_narrow_strips(page_width, page_height, img_sz_list):
     return narrow_strip_pages_ratio < 0.5
 
 
-def classify(pdf_path, total_page: int, page_width, page_height, img_sz_list: list, text_len_list: list, img_num_list: list, text_layout_list: list):
+def classify(total_page: int, page_width, page_height, img_sz_list: list, text_len_list: list, img_num_list: list, text_layout_list: list):
     """
     这里的图片和页面长度单位是pts
     :param total_page:
@@ -323,7 +324,7 @@ def classify(pdf_path, total_page: int, page_width, page_height, img_sz_list: li
     elif not any(results.values()):
         return False, results
     else:
-        print(f"WARNING: {pdf_path} is not classified by area and text_len, by_image_area: {results['by_image_area']}, by_text: {results['by_text_len']}, by_avg_words: {results['by_avg_words']}, by_img_num: {results['by_img_num']}, by_text_layout: {results['by_text_layout']}, by_img_narrow_strips: {results['by_img_narrow_strips']}", file=sys.stderr)  # 利用这种情况可以快速找出来哪些pdf比较特殊,针对性修正分类算法
+        logger.warning(f"pdf is not classified by area and text_len, by_image_area: {results['by_image_area']}, by_text: {results['by_text_len']}, by_avg_words: {results['by_avg_words']}, by_img_num: {results['by_img_num']}, by_text_layout: {results['by_text_layout']}, by_img_narrow_strips: {results['by_img_narrow_strips']}", file=sys.stderr)  # 利用这种情况可以快速找出来哪些pdf比较特殊,针对性修正分类算法
         return False, results
 
 
@@ -350,7 +351,7 @@ def main(json_file):
                 is_needs_password = o['is_needs_password']
                 if is_encrypted or total_page == 0 or is_needs_password:  # 加密的,需要密码的,没有页面的,都不处理
                     continue
-                tag = classify(pdf_path, total_page, page_width, page_height, img_sz_list, text_len_list, text_layout_list)
+                tag = classify(total_page, page_width, page_height, img_sz_list, text_len_list, text_layout_list)
                 o['is_text_pdf'] = tag
                 print(json.dumps(o, ensure_ascii=False))
     except Exception as e:

+ 4 - 5
magic_pdf/filter/pdf_meta_scan.py

@@ -287,7 +287,7 @@ def get_language(doc: fitz.Document):
     return language
 
 
-def pdf_meta_scan(s3_pdf_path: str, pdf_bytes: bytes):
+def pdf_meta_scan(pdf_bytes: bytes):
     """
     :param s3_pdf_path:
     :param pdf_bytes: pdf文件的二进制数据
@@ -298,8 +298,8 @@ def pdf_meta_scan(s3_pdf_path: str, pdf_bytes: bytes):
     is_encrypted = doc.is_encrypted
     total_page = len(doc)
     if total_page == 0:
-        logger.warning(f"drop this pdf: {s3_pdf_path}, drop_reason: {DropReason.EMPTY_PDF}")
-        result = {"need_drop": True, "drop_reason": DropReason.EMPTY_PDF}
+        logger.warning(f"drop this pdf, drop_reason: {DropReason.EMPTY_PDF}")
+        result = {"_need_drop": True, "_drop_reason": DropReason.EMPTY_PDF}
         return result
     else:
         page_width_pts, page_height_pts = get_pdf_page_size_pts(doc)
@@ -322,7 +322,6 @@ def pdf_meta_scan(s3_pdf_path: str, pdf_bytes: bytes):
 
         # 最后输出一条json
         res = {
-            "pdf_path": s3_pdf_path,
             "is_needs_password": is_needs_password,
             "is_encrypted": is_encrypted,
             "total_page": total_page,
@@ -350,7 +349,7 @@ def main(s3_pdf_path: str, s3_profile: str):
     """
     try:
         file_content = read_file(s3_pdf_path, s3_profile)
-        pdf_meta_scan(s3_pdf_path, file_content)
+        pdf_meta_scan(file_content)
     except Exception as e:
         print(f"ERROR: {s3_pdf_path}, {e}", file=sys.stderr)
         logger.exception(e)

+ 21 - 0
magic_pdf/libs/detect_language_from_model.py

@@ -0,0 +1,21 @@
+from collections import Counter
+
+from magic_pdf.libs.language import detect_lang
+
+def get_language_from_model(model_list: list):
+    language_lst = []
+    for ocr_page_info in model_list:
+        page_text = ""
+        layout_dets = ocr_page_info["layout_dets"]
+        for layout_det in layout_dets:
+            category_id = layout_det["category_id"]
+            allow_category_id_list = [15]
+            if category_id in allow_category_id_list:
+                page_text += layout_det["text"]
+        page_language = detect_lang(page_text)
+        language_lst.append(page_language)
+    # 统计text_language_list中每种语言的个数
+    count_dict = Counter(language_lst)
+    # 输出text_language_list中出现的次数最多的语言
+    language = max(count_dict, key=count_dict.get)
+    return language

+ 1 - 1
magic_pdf/libs/drop_reason.py

@@ -8,7 +8,7 @@ class DropReason:
     HIGH_COMPUTATIONAL_lOAD_BY_SVGS = "high_computational_load_by_svgs" # 特殊的SVG图,计算量太大,从而丢弃
     HIGH_COMPUTATIONAL_lOAD_BY_TOTAL_PAGES = "high_computational_load_by_total_pages" # 计算量超过负荷,当前方法下计算量消耗过大
     MISS_DOC_LAYOUT_RESULT = "missing doc_layout_result" # 版面分析失败
-    Exception = "exception" # 解析中发生异常
+    Exception = "_exception" # 解析中发生异常
     ENCRYPTED = "encrypted" # PDF是加密的
     EMPTY_PDF = "total_page=0" # PDF页面总数为0
     NOT_IS_TEXT_PDF = "not_is_text_pdf" # 不是文字版PDF,无法直接解析

+ 11 - 11
magic_pdf/pdf_parse_by_txt.py

@@ -107,7 +107,7 @@ def parse_pdf_by_txt(
         # 去除对junkimg的依赖,简化逻辑
         if len(page_imgs) > 1500:  # 如果当前页超过1500张图片,直接跳过
             logger.warning(f"page_id: {page_id}, img_counts: {len(page_imgs)}, drop this pdf")
-            result = {"need_drop": True, "drop_reason": DropReason.HIGH_COMPUTATIONAL_lOAD_BY_IMGS}
+            result = {"_need_drop": True, "_drop_reason": DropReason.HIGH_COMPUTATIONAL_lOAD_BY_IMGS}
             if not debug_mode:
                 return result
 
@@ -236,7 +236,7 @@ def parse_pdf_by_txt(
         if is_text_block_horz_overlap:
             # debug_show_bbox(pdf_docs, page_id, [b['bbox'] for b in remain_text_blocks], [], [], join_path(save_path, book_name, f"{book_name}_debug.pdf"), 0)
             logger.warning(f"page_id: {page_id}, drop this pdf: {pdf_bytes_md5}, reason: {DropReason.TEXT_BLCOK_HOR_OVERLAP}")
-            result = {"need_drop": True, "drop_reason": DropReason.TEXT_BLCOK_HOR_OVERLAP}
+            result = {"_need_drop": True, "_drop_reason": DropReason.TEXT_BLCOK_HOR_OVERLAP}
             if not debug_mode:
                 return result
 
@@ -255,14 +255,14 @@ def parse_pdf_by_txt(
         
         if len(remain_text_blocks)>0 and len(all_bboxes)>0 and len(layout_bboxes)==0:
             logger.warning(f"page_id: {page_id}, drop this pdf: {pdf_bytes_md5}, reason: {DropReason.CAN_NOT_DETECT_PAGE_LAYOUT}")
-            result = {"need_drop": True, "drop_reason": DropReason.CAN_NOT_DETECT_PAGE_LAYOUT}
+            result = {"_need_drop": True, "_drop_reason": DropReason.CAN_NOT_DETECT_PAGE_LAYOUT}
             if not debug_mode:
                 return result
 
         """以下去掉复杂的布局和超过2列的布局"""
         if any([lay["layout_label"] == LAYOUT_UNPROC for lay in layout_bboxes]):  # 复杂的布局
             logger.warning(f"page_id: {page_id}, drop this pdf: {pdf_bytes_md5}, reason: {DropReason.COMPLICATED_LAYOUT}")
-            result = {"need_drop": True, "drop_reason": DropReason.COMPLICATED_LAYOUT}
+            result = {"_need_drop": True, "_drop_reason": DropReason.COMPLICATED_LAYOUT}
             if not debug_mode:
                 return result
 
@@ -270,8 +270,8 @@ def parse_pdf_by_txt(
         if layout_column_width > 2:  # 去掉超过2列的布局pdf
             logger.warning(f"page_id: {page_id}, drop this pdf: {pdf_bytes_md5}, reason: {DropReason.TOO_MANY_LAYOUT_COLUMNS}")
             result = {
-                "need_drop": True,
-                "drop_reason": DropReason.TOO_MANY_LAYOUT_COLUMNS,
+                "_need_drop": True,
+                "_drop_reason": DropReason.TOO_MANY_LAYOUT_COLUMNS,
                 "extra_info": {"column_cnt": layout_column_width},
             }
             if not debug_mode:
@@ -377,23 +377,23 @@ def parse_pdf_by_txt(
         logger.warning(f"page_id: {page_id}, drop this pdf: {pdf_bytes_md5}, reason: {error_info}")
         if error_info == denseSingleLineBlockException_msg:
             logger.warning(f"Drop this pdf: {pdf_bytes_md5}, reason: {DropReason.DENSE_SINGLE_LINE_BLOCK}")
-            result = {"need_drop": True, "drop_reason": DropReason.DENSE_SINGLE_LINE_BLOCK}
+            result = {"_need_drop": True, "_drop_reason": DropReason.DENSE_SINGLE_LINE_BLOCK}
             return result
         if error_info == titleDetectionException_msg:
             logger.warning(f"Drop this pdf: {pdf_bytes_md5}, reason: {DropReason.TITLE_DETECTION_FAILED}")
-            result = {"need_drop": True, "drop_reason": DropReason.TITLE_DETECTION_FAILED}
+            result = {"_need_drop": True, "_drop_reason": DropReason.TITLE_DETECTION_FAILED}
             return result
         elif error_info == titleLevelException_msg:
             logger.warning(f"Drop this pdf: {pdf_bytes_md5}, reason: {DropReason.TITLE_LEVEL_FAILED}")
-            result = {"need_drop": True, "drop_reason": DropReason.TITLE_LEVEL_FAILED}
+            result = {"_need_drop": True, "_drop_reason": DropReason.TITLE_LEVEL_FAILED}
             return result
         elif error_info == paraSplitException_msg:
             logger.warning(f"Drop this pdf: {pdf_bytes_md5}, reason: {DropReason.PARA_SPLIT_FAILED}")
-            result = {"need_drop": True, "drop_reason": DropReason.PARA_SPLIT_FAILED}
+            result = {"_need_drop": True, "_drop_reason": DropReason.PARA_SPLIT_FAILED}
             return result
         elif error_info == paraMergeException_msg:
             logger.warning(f"Drop this pdf: {pdf_bytes_md5}, reason: {DropReason.PARA_MERGE_FAILED}")
-            result = {"need_drop": True, "drop_reason": DropReason.PARA_MERGE_FAILED}
+            result = {"_need_drop": True, "_drop_reason": DropReason.PARA_MERGE_FAILED}
             return result
 
     pdf_info_dict, error_info = para_process_pipeline.para_process_pipeline(pdf_info_dict)

+ 16 - 16
magic_pdf/pdf_parse_for_train.py

@@ -183,8 +183,8 @@ def parse_pdf_for_train(
                 f"page_id: {page_id}, img_counts: {img_counts}, drop this pdf: {book_name}, drop_reason: {DropReason.HIGH_COMPUTATIONAL_lOAD_BY_IMGS}"
             )
             result = {
-                "need_drop": True,
-                "drop_reason": DropReason.HIGH_COMPUTATIONAL_lOAD_BY_IMGS,
+                "_need_drop": True,
+                "_drop_reason": DropReason.HIGH_COMPUTATIONAL_lOAD_BY_IMGS,
             }
             if not debug_mode:
                 return result
@@ -396,8 +396,8 @@ def parse_pdf_for_train(
                 f"page_id: {page_id}, drop this pdf: {book_name}, reason: {DropReason.TEXT_BLCOK_HOR_OVERLAP}"
             )
             result = {
-                "need_drop": True,
-                "drop_reason": DropReason.TEXT_BLCOK_HOR_OVERLAP,
+                "_need_drop": True,
+                "_drop_reason": DropReason.TEXT_BLCOK_HOR_OVERLAP,
             }
             if not debug_mode:
                 return result
@@ -443,8 +443,8 @@ def parse_pdf_for_train(
                 f"page_id: {page_id}, drop this pdf: {book_name}, reason: {DropReason.CAN_NOT_DETECT_PAGE_LAYOUT}"
             )
             result = {
-                "need_drop": True,
-                "drop_reason": DropReason.CAN_NOT_DETECT_PAGE_LAYOUT,
+                "_need_drop": True,
+                "_drop_reason": DropReason.CAN_NOT_DETECT_PAGE_LAYOUT,
             }
             if not debug_mode:
                 return result
@@ -456,7 +456,7 @@ def parse_pdf_for_train(
             logger.warning(
                 f"page_id: {page_id}, drop this pdf: {book_name}, reason: {DropReason.COMPLICATED_LAYOUT}"
             )
-            result = {"need_drop": True, "drop_reason": DropReason.COMPLICATED_LAYOUT}
+            result = {"_need_drop": True, "_drop_reason": DropReason.COMPLICATED_LAYOUT}
             if not debug_mode:
                 return result
 
@@ -466,8 +466,8 @@ def parse_pdf_for_train(
                 f"page_id: {page_id}, drop this pdf: {book_name}, reason: {DropReason.TOO_MANY_LAYOUT_COLUMNS}"
             )
             result = {
-                "need_drop": True,
-                "drop_reason": DropReason.TOO_MANY_LAYOUT_COLUMNS,
+                "_need_drop": True,
+                "_drop_reason": DropReason.TOO_MANY_LAYOUT_COLUMNS,
                 "extra_info": {"column_cnt": layout_column_width},
             }
             if not debug_mode:
@@ -616,8 +616,8 @@ def parse_pdf_for_train(
                 f"Drop this pdf: {book_name}, reason: {DropReason.DENSE_SINGLE_LINE_BLOCK}"
             )
             result = {
-                "need_drop": True,
-                "drop_reason": DropReason.DENSE_SINGLE_LINE_BLOCK,
+                "_need_drop": True,
+                "_drop_reason": DropReason.DENSE_SINGLE_LINE_BLOCK,
             }
             return result
         if error_info == titleDetectionException_msg:
@@ -625,27 +625,27 @@ def parse_pdf_for_train(
                 f"Drop this pdf: {book_name}, reason: {DropReason.TITLE_DETECTION_FAILED}"
             )
             result = {
-                "need_drop": True,
-                "drop_reason": DropReason.TITLE_DETECTION_FAILED,
+                "_need_drop": True,
+                "_drop_reason": DropReason.TITLE_DETECTION_FAILED,
             }
             return result
         elif error_info == titleLevelException_msg:
             logger.warning(
                 f"Drop this pdf: {book_name}, reason: {DropReason.TITLE_LEVEL_FAILED}"
             )
-            result = {"need_drop": True, "drop_reason": DropReason.TITLE_LEVEL_FAILED}
+            result = {"_need_drop": True, "_drop_reason": DropReason.TITLE_LEVEL_FAILED}
             return result
         elif error_info == paraSplitException_msg:
             logger.warning(
                 f"Drop this pdf: {book_name}, reason: {DropReason.PARA_SPLIT_FAILED}"
             )
-            result = {"need_drop": True, "drop_reason": DropReason.PARA_SPLIT_FAILED}
+            result = {"_need_drop": True, "_drop_reason": DropReason.PARA_SPLIT_FAILED}
             return result
         elif error_info == paraMergeException_msg:
             logger.warning(
                 f"Drop this pdf: {book_name}, reason: {DropReason.PARA_MERGE_FAILED}"
             )
-            result = {"need_drop": True, "drop_reason": DropReason.PARA_MERGE_FAILED}
+            result = {"_need_drop": True, "_drop_reason": DropReason.PARA_MERGE_FAILED}
             return result
 
     if debug_mode:

+ 26 - 26
magic_pdf/pipeline.bak

@@ -32,8 +32,8 @@ def meta_scan(jso: dict, doc_layout_check=True) -> dict:
         if (
             "doc_layout_result" not in jso
         ):  # 检测json中是存在模型数据,如果没有则需要跳过该pdf
-            jso["need_drop"] = True
-            jso["drop_reason"] = DropReason.MISS_DOC_LAYOUT_RESULT
+            jso["_need_drop"] = True
+            jso["_drop_reason"] = DropReason.MISS_DOC_LAYOUT_RESULT
             return jso
     try:
         data_source = get_data_source(jso)
@@ -58,10 +58,10 @@ def meta_scan(jso: dict, doc_layout_check=True) -> dict:
         start_time = time.time()  # 记录开始时间
         res = pdf_meta_scan(s3_pdf_path, file_content)
         if res.get(
-            "need_drop", False
+            "_need_drop", False
         ):  # 如果返回的字典里有need_drop,则提取drop_reason并跳过本次解析
-            jso["need_drop"] = True
-            jso["drop_reason"] = res["drop_reason"]
+            jso["_need_drop"] = True
+            jso["_drop_reason"] = res["_drop_reason"]
         else:  # 正常返回
             jso["pdf_meta"] = res
             jso["content"] = ""
@@ -85,7 +85,7 @@ def classify_by_type(jso: dict, debug_mode=False) -> dict:
     if debug_mode:
         pass
     else:  # 如果debug没开,则检测是否有needdrop字段
-        if jso.get("need_drop", False):
+        if jso.get("_need_drop", False):
             return jso
     # 开始正式逻辑
     try:
@@ -113,8 +113,8 @@ def classify_by_type(jso: dict, debug_mode=False) -> dict:
         if (
             is_encrypted or is_needs_password
         ):  # 加密的,需要密码的,没有页面的,都不处理
-            jso["need_drop"] = True
-            jso["drop_reason"] = DropReason.ENCRYPTED
+            jso["_need_drop"] = True
+            jso["_drop_reason"] = DropReason.ENCRYPTED
         else:
             start_time = time.time()  # 记录开始时间
             is_text_pdf, results = classify(
@@ -139,8 +139,8 @@ def classify_by_type(jso: dict, debug_mode=False) -> dict:
                 if (
                     text_language not in allow_language
                 ):  # 如果语言不在允许的语言中,则drop
-                    jso["need_drop"] = True
-                    jso["drop_reason"] = DropReason.NOT_ALLOW_LANGUAGE
+                    jso["_need_drop"] = True
+                    jso["_drop_reason"] = DropReason.NOT_ALLOW_LANGUAGE
                     return jso
             else:
                 # 先不drop
@@ -148,8 +148,8 @@ def classify_by_type(jso: dict, debug_mode=False) -> dict:
                 jso["_pdf_type"] = "OCR"
                 jso["pdf_meta"] = pdf_meta
                 jso["classify_time"] = classify_time
-                # jso["need_drop"] = True
-                # jso["drop_reason"] = DropReason.NOT_IS_TEXT_PDF
+                # jso["_need_drop"] = True
+                # jso["_drop_reason"] = DropReason.NOT_IS_TEXT_PDF
                 extra_info = {"classify_rules": []}
                 for condition, result in results.items():
                     if not result:
@@ -162,7 +162,7 @@ def classify_by_type(jso: dict, debug_mode=False) -> dict:
 
 
 def drop_needdrop_pdf(jso: dict) -> dict:
-    if jso.get("need_drop", False):
+    if jso.get("_need_drop", False):
         logger.info(
             f"book_name is:{get_data_source(jso)}/{jso['file_id']} need drop",
             file=sys.stderr,
@@ -176,7 +176,7 @@ def pdf_intermediate_dict_to_markdown(jso: dict, debug_mode=False) -> dict:
     if debug_mode:
         pass
     else:  # 如果debug没开,则检测是否有needdrop字段
-        if jso.get("need_drop", False):
+        if jso.get("_need_drop", False):
             book_name = join_path(get_data_source(jso), jso["file_id"])
             logger.info(f"book_name is:{book_name} need drop", file=sys.stderr)
             jso["dropped"] = True
@@ -203,7 +203,7 @@ def parse_pdf(jso: dict, start_page_id=0, debug_mode=False) -> dict:
     if debug_mode:
         pass
     else:  # 如果debug没开,则检测是否有needdrop字段
-        if jso.get("need_drop", False):
+        if jso.get("_need_drop", False):
             return jso
     # 开始正式逻辑
     s3_pdf_path = jso.get("file_location")
@@ -220,8 +220,8 @@ def parse_pdf(jso: dict, start_page_id=0, debug_mode=False) -> dict:
     svgs_per_page_list = jso["pdf_meta"]["svgs_per_page"]
     max_svgs = max(svgs_per_page_list)
     if max_svgs > 3000:
-        jso["need_drop"] = True
-        jso["drop_reason"] = DropReason.HIGH_COMPUTATIONAL_lOAD_BY_SVGS
+        jso["_need_drop"] = True
+        jso["_drop_reason"] = DropReason.HIGH_COMPUTATIONAL_lOAD_BY_SVGS
     else:
         try:
             save_path = s3_image_save_path
@@ -244,10 +244,10 @@ def parse_pdf(jso: dict, start_page_id=0, debug_mode=False) -> dict:
                 debug_mode=debug_mode,
             )
             if pdf_info_dict.get(
-                "need_drop", False
+                "_need_drop", False
             ):  # 如果返回的字典里有need_drop,则提取drop_reason并跳过本次解析
-                jso["need_drop"] = True
-                jso["drop_reason"] = pdf_info_dict["drop_reason"]
+                jso["_need_drop"] = True
+                jso["_drop_reason"] = pdf_info_dict["_drop_reason"]
             else:  # 正常返回,将 pdf_info_dict 压缩并存储
                 pdf_info_dict = JsonCompressor.compress_json(pdf_info_dict)
                 jso["pdf_intermediate_dict"] = pdf_info_dict
@@ -269,7 +269,7 @@ def parse_pdf_for_model_train(jso: dict, start_page_id=0, debug_mode=False) -> d
     if debug_mode:
         pass
     else:  # 如果debug没开,则检测是否有needdrop字段
-        if jso.get("need_drop", False):
+        if jso.get("_need_drop", False):
             return jso
     # 开始正式逻辑
     s3_pdf_path = jso.get("file_location")
@@ -295,8 +295,8 @@ def parse_pdf_for_model_train(jso: dict, start_page_id=0, debug_mode=False) -> d
     svgs_per_page_list = jso["pdf_meta"]["svgs_per_page"]
     max_svgs = max(svgs_per_page_list)
     if max_svgs > 3000:
-        jso["need_drop"] = True
-        jso["drop_reason"] = DropReason.HIGH_COMPUTATIONAL_lOAD_BY_SVGS
+        jso["_need_drop"] = True
+        jso["_drop_reason"] = DropReason.HIGH_COMPUTATIONAL_lOAD_BY_SVGS
     # elif total_page > 1000:
     #     jso['need_drop'] = True
     #     jso['drop_reason'] = DropReason.HIGH_COMPUTATIONAL_lOAD_BY_TOTAL_PAGES
@@ -323,10 +323,10 @@ def parse_pdf_for_model_train(jso: dict, start_page_id=0, debug_mode=False) -> d
                 debug_mode=debug_mode,
             )
             if pdf_info_dict.get(
-                "need_drop", False
+                "_need_drop", False
             ):  # 如果返回的字典里有need_drop,则提取drop_reason并跳过本次解析
-                jso["need_drop"] = True
-                jso["drop_reason"] = pdf_info_dict["drop_reason"]
+                jso["_need_drop"] = True
+                jso["_drop_reason"] = pdf_info_dict["_drop_reason"]
             else:  # 正常返回,将 pdf_info_dict 压缩并存储
                 jso["parsed_results"] = convert_to_train_format(pdf_info_dict)
                 pdf_info_dict = JsonCompressor.compress_json(pdf_info_dict)

+ 9 - 9
magic_pdf/pipeline_ocr.bak

@@ -17,7 +17,7 @@ def ocr_pdf_intermediate_dict_to_markdown(jso: dict, debug_mode=False) -> dict:
     if debug_mode:
         pass
     else:  # 如果debug没开,则检测是否有needdrop字段
-        if jso.get("need_drop", False):
+        if jso.get("_need_drop", False):
             book_name = join_path(get_data_source(jso), jso["file_id"])
             logger.info(f"book_name is:{book_name} need drop", file=sys.stderr)
             jso["dropped"] = True
@@ -45,7 +45,7 @@ def ocr_pdf_intermediate_dict_to_markdown_with_para(jso: dict, mode, debug_mode=
     if debug_mode:
         pass
     else:  # 如果debug没开,则检测是否有needdrop字段
-        if jso.get("need_drop", False):
+        if jso.get("_need_drop", False):
             book_name = join_path(get_data_source(jso), jso["file_id"])
             logger.info(f"book_name is:{book_name} need drop", file=sys.stderr)
             jso["dropped"] = True
@@ -78,7 +78,7 @@ def ocr_pdf_intermediate_dict_to_markdown_with_para_and_pagination(jso: dict, de
     if debug_mode:
         pass
     else:  # 如果debug没开,则检测是否有needdrop字段
-        if jso.get("need_drop", False):
+        if jso.get("_need_drop", False):
             book_name = join_path(get_data_source(jso), jso["file_id"])
             logger.info(f"book_name is:{book_name} need drop", file=sys.stderr)
             jso["dropped"] = True
@@ -108,7 +108,7 @@ def ocr_pdf_intermediate_dict_to_markdown_with_para_for_qa(
     if debug_mode:
         pass
     else:  # 如果debug没开,则检测是否有needdrop字段
-        if jso.get("need_drop", False):
+        if jso.get("_need_drop", False):
             book_name = join_path(get_data_source(jso), jso["file_id"])
             logger.info(f"book_name is:{book_name} need drop", file=sys.stderr)
             jso["dropped"] = True
@@ -137,7 +137,7 @@ def ocr_pdf_intermediate_dict_to_standard_format(jso: dict, debug_mode=False) ->
     if debug_mode:
         pass
     else:  # 如果debug没开,则检测是否有needdrop字段
-        if jso.get("need_drop", False):
+        if jso.get("_need_drop", False):
             book_name = join_path(get_data_source(jso), jso["file_id"])
             logger.info(f"book_name is:{book_name} need drop", file=sys.stderr)
             jso["dropped"] = True
@@ -165,7 +165,7 @@ def ocr_pdf_intermediate_dict_to_standard_format_with_para(jso: dict, debug_mode
     if debug_mode:
         pass
     else:  # 如果debug没开,则检测是否有needdrop字段
-        if jso.get("need_drop", False):
+        if jso.get("_need_drop", False):
             book_name = join_path(get_data_source(jso), jso["file_id"])
             logger.info(f"book_name is:{book_name} need drop", file=sys.stderr)
             jso["dropped"] = True
@@ -221,7 +221,7 @@ def ocr_parse_pdf_core(pdf_bytes, model_output_json_list, book_name, start_page_
 
 # 专门用来跑被drop的pdf,跑完之后需要把need_drop字段置为false
 def ocr_dropped_parse_pdf(jso: dict, start_page_id=0, debug_mode=False) -> dict:
-    if not jso.get("need_drop", False):
+    if not jso.get("_need_drop", False):
         return jso
     else:
         try:
@@ -233,7 +233,7 @@ def ocr_dropped_parse_pdf(jso: dict, start_page_id=0, debug_mode=False) -> dict:
             )
             jso["pdf_intermediate_dict"] = JsonCompressor.compress_json(pdf_info_dict)
             jso["parse_time"] = parse_time
-            jso["need_drop"] = False
+            jso["_need_drop"] = False
         except Exception as e:
             jso = exception_handler(jso, e)
         return jso
@@ -244,7 +244,7 @@ def ocr_parse_pdf(jso: dict, start_page_id=0, debug_mode=False) -> dict:
     if debug_mode:
         pass
     else:  # 如果debug没开,则检测是否有needdrop字段
-        if jso.get("need_drop", False):
+        if jso.get("_need_drop", False):
             return jso
     try:
         pdf_bytes = get_pdf_bytes(jso)

+ 2 - 2
magic_pdf/pipeline_txt.bak

@@ -18,7 +18,7 @@ def txt_pdf_to_standard_format(jso: dict, debug_mode=False) -> dict:
     if debug_mode:
         pass
     else:  # 如果debug没开,则检测是否有needdrop字段
-        if jso.get("need_drop", False):
+        if jso.get("_need_drop", False):
             book_name = join_path(get_data_source(jso), jso["file_id"])
             logger.info(f"book_name is:{book_name} need drop")
             jso["dropped"] = True
@@ -46,7 +46,7 @@ def txt_pdf_to_mm_markdown_format(jso: dict, debug_mode=False) -> dict:
     if debug_mode:
         pass
     else:  # 如果debug没开,则检测是否有needdrop字段
-        if jso.get("need_drop", False):
+        if jso.get("_need_drop", False):
             book_name = join_path(get_data_source(jso), jso["file_id"])
             logger.info(f"book_name is:{book_name} need drop")
             jso["dropped"] = True

+ 1 - 1
magic_pdf/post_proc/pdf_post_filter.py

@@ -62,6 +62,6 @@ def pdf_post_filter(page_info) -> tuple:
     """
     bool_is_pseudo_single_column, extra_info = __is_pseudo_single_column(page_info)
     if bool_is_pseudo_single_column:
-        return False, {"need_drop": True, "drop_reason": DropReason.PSEUDO_SINGLE_COLUMN, "extra_info": extra_info}
+        return False, {"_need_drop": True, "_drop_reason": DropReason.PSEUDO_SINGLE_COLUMN, "extra_info": extra_info}
 
     return True, None

+ 1 - 1
magic_pdf/pre_proc/pdf_pre_filter.py

@@ -68,7 +68,7 @@ def pdf_filter(page:fitz.Page, text_blocks, table_bboxes, image_bboxes) -> tuple
         
     """
     if __is_contain_color_background_rect(page, text_blocks, image_bboxes):
-        return False, {"need_drop": True, "drop_reason": DropReason.COLOR_BACKGROUND_TEXT_BOX}
+        return False, {"_need_drop": True, "_drop_reason": DropReason.COLOR_BACKGROUND_TEXT_BOX}
 
     
     return True, None

+ 81 - 0
magic_pdf/spark/UNIPipe.py

@@ -0,0 +1,81 @@
+from loguru import logger
+
+from magic_pdf.dict2md.mkcontent import mk_universal_format
+from magic_pdf.dict2md.ocr_mkcontent import make_standard_format_with_para
+from magic_pdf.filter.pdf_classify_by_type import classify
+from magic_pdf.filter.pdf_meta_scan import pdf_meta_scan
+from magic_pdf.libs.detect_language_from_model import get_language_from_model
+from magic_pdf.libs.drop_reason import DropReason
+from magic_pdf.libs.json_compressor import JsonCompressor
+from magic_pdf.spark.spark_api import parse_union_pdf, parse_ocr_pdf
+
+
+class UNIPipe:
+    def __init__(self):
+        pass
+
+    def classify(self, pdf_bytes: bytes) -> str:
+        """
+        根据pdf的元数据,判断是否是文本pdf,还是ocr pdf
+        """
+        pdf_meta = pdf_meta_scan(pdf_bytes)
+        if pdf_meta.get("_need_drop", False):  # 如果返回了需要丢弃的标志,则抛出异常
+            raise Exception(f"pdf meta_scan need_drop,reason is {pdf_meta['_drop_reason']}")
+        else:
+            is_encrypted = pdf_meta["is_encrypted"]
+            is_needs_password = pdf_meta["is_needs_password"]
+            if is_encrypted or is_needs_password:  # 加密的,需要密码的,没有页面的,都不处理
+                raise Exception(f"pdf meta_scan need_drop,reason is {DropReason.ENCRYPTED}")
+            else:
+                is_text_pdf, results = classify(
+                    pdf_meta["total_page"],
+                    pdf_meta["page_width_pts"],
+                    pdf_meta["page_height_pts"],
+                    pdf_meta["image_info_per_page"],
+                    pdf_meta["text_len_per_page"],
+                    pdf_meta["imgs_per_page"],
+                    pdf_meta["text_layout_per_page"],
+                )
+                if is_text_pdf:
+                    return "txt"
+                else:
+                    return "ocr"
+
+    def parse(self, pdf_bytes: bytes, image_writer, jso_useful_key) -> dict:
+        """
+        根据pdf类型,解析pdf
+        """
+        text_language = get_language_from_model(jso_useful_key['model_list'])
+        allow_language = ["zh", "en"]  # 允许的语言,目前只允许简中和英文的
+        logger.info(f"pdf text_language is {text_language}")
+        if text_language not in allow_language:  # 如果语言不在允许的语言中,则drop
+            raise Exception(f"pdf meta_scan need_drop,reason is {DropReason.NOT_ALLOW_LANGUAGE}")
+        else:
+            if jso_useful_key['_pdf_type'] == "txt":
+                pdf_mid_data = parse_union_pdf(pdf_bytes, jso_useful_key['model_list'], image_writer)
+            elif jso_useful_key['_pdf_type'] == "ocr":
+                pdf_mid_data = parse_ocr_pdf(pdf_bytes, jso_useful_key['model_list'], image_writer)
+            else:
+                raise Exception(f"pdf type is not txt or ocr")
+            return JsonCompressor.compress(pdf_mid_data)
+
+    def mk_uni_format(self, pdf_mid_data: str, img_buket_path: str) -> list:
+        """
+        根据pdf类型,生成统一格式content_list
+        """
+        pdf_mid_data = JsonCompressor.decompress_json(pdf_mid_data)
+        parse_type = pdf_mid_data["_parse_type"]
+        if parse_type == "txt":
+            content_list = mk_universal_format(pdf_mid_data, img_buket_path)
+        elif parse_type == "ocr":
+            content_list = make_standard_format_with_para(pdf_mid_data, img_buket_path)
+        return content_list
+
+
+if __name__ == '__main__':
+    # 测试
+    pipe = UNIPipe()
+    pdf_bytes = open(r"D:\project\20231108code-clean\magic_pdf\tmp\unittest\download-pdfs\数学新星网\edu_00001544.pdf",
+                     "rb").read()
+    pdf_type = pipe.classify(pdf_bytes)
+    logger.info(f"pdf_type is {pdf_type}")

+ 3 - 3
magic_pdf/spark/base.py

@@ -26,9 +26,9 @@ def get_bookid(jso: dict):
 
 def exception_handler(jso: dict, e):
     logger.exception(e)
-    jso["need_drop"] = True
-    jso["drop_reason"] = DropReason.Exception
-    jso["exception"] = f"ERROR: {e}"
+    jso["_need_drop"] = True
+    jso["_drop_reason"] = DropReason.Exception
+    jso["_exception"] = f"ERROR: {e}"
     return jso
 
 

+ 16 - 8
magic_pdf/spark/spark_api.py

@@ -36,7 +36,7 @@ def parse_txt_pdf(pdf_bytes:bytes, pdf_models:list, imageWriter: AbsReaderWriter
     return pdf_info_dict
 
 
-def parse_ocr_pdf(pdf_bytes:bytes,  pdf_models:list, imageWriter: AbsReaderWriter, is_debug=False, start_page=0, *args, **kwargs):
+def parse_ocr_pdf(pdf_bytes:bytes, pdf_models:list, imageWriter: AbsReaderWriter, is_debug=False, start_page=0, *args, **kwargs):
     """
     解析ocr类pdf
     """
@@ -48,12 +48,12 @@ def parse_ocr_pdf(pdf_bytes:bytes,  pdf_models:list, imageWriter: AbsReaderWrite
         debug_mode=is_debug,
     )
 
-    pdf_info_dict["parse_type"] = "ocr"
+    pdf_info_dict["_parse_type"] = "ocr"
 
     return pdf_info_dict
 
 
-def parse_union_pdf(pdf_bytes:bytes,  pdf_models:list, imageWriter: AbsReaderWriter, is_debug=False, start_page=0,  *args, **kwargs):
+def parse_union_pdf(pdf_bytes:bytes, pdf_models:list, imageWriter: AbsReaderWriter, is_debug=False, start_page=0,  *args, **kwargs):
     """
     ocr和文本混合的pdf,全部解析出来
     """
@@ -72,18 +72,26 @@ def parse_union_pdf(pdf_bytes:bytes,  pdf_models:list, imageWriter: AbsReaderWri
 
     pdf_info_dict = parse_pdf(parse_pdf_by_txt)
 
-    if pdf_info_dict is None or pdf_info_dict.get("need_drop", False):
+    if pdf_info_dict is None or pdf_info_dict.get("_need_drop", False):
         logger.warning(f"parse_pdf_by_txt drop or error, switch to parse_pdf_by_ocr")
         pdf_info_dict = parse_pdf(parse_pdf_by_ocr)
         if pdf_info_dict is None:
             raise Exception("Both parse_pdf_by_txt and parse_pdf_by_ocr failed.")
         else:
-            pdf_info_dict["parse_type"] = "ocr"
+            pdf_info_dict["_parse_type"] = "ocr"
     else:
-        pdf_info_dict["parse_type"] = "txt"
+        pdf_info_dict["_parse_type"] = "txt"
 
     return pdf_info_dict
 
 
-def spark_json_extractor(jso:dict):
-    pass
+def spark_json_extractor(jso: dict) -> dict:
+
+    """
+    从json中提取数据,返回一个dict
+    """
+
+    return {
+        "_pdf_type": jso["_pdf_type"],
+        "model_list": jso["doc_layout_result"],
+    }