Ver código fonte

Merge pull request #1252 from myhloli/dev

fix(detect_invalid_chars):fix the stack error caused by multiple memory releases in PyMuPDF
Xiaomeng Zhao 11 meses atrás
pai
commit
fdf15a45d6

+ 3 - 2
magic_pdf/filter/pdf_meta_scan.py

@@ -8,7 +8,7 @@ from loguru import logger
 from magic_pdf.config.drop_reason import DropReason
 from magic_pdf.config.drop_reason import DropReason
 from magic_pdf.libs.commons import get_top_percent_list, mymax
 from magic_pdf.libs.commons import get_top_percent_list, mymax
 from magic_pdf.libs.language import detect_lang
 from magic_pdf.libs.language import detect_lang
-from magic_pdf.libs.pdf_check import detect_invalid_chars_by_pymupdf
+from magic_pdf.libs.pdf_check import detect_invalid_chars_by_pymupdf, detect_invalid_chars
 
 
 scan_max_page = 50
 scan_max_page = 50
 junk_limit_min = 10
 junk_limit_min = 10
@@ -323,7 +323,8 @@ def get_language(doc: fitz.Document):
 
 
 def check_invalid_chars(pdf_bytes):
 def check_invalid_chars(pdf_bytes):
     """乱码检测."""
     """乱码检测."""
-    return detect_invalid_chars_by_pymupdf(pdf_bytes)
+    # return detect_invalid_chars_by_pymupdf(pdf_bytes)
+    return detect_invalid_chars(pdf_bytes)
 
 
 
 
 def pdf_meta_scan(pdf_bytes: bytes):
 def pdf_meta_scan(pdf_bytes: bytes):

+ 30 - 30
magic_pdf/libs/pdf_check.py

@@ -1,9 +1,9 @@
 import fitz
 import fitz
 import numpy as np
 import numpy as np
 from loguru import logger
 from loguru import logger
-# import re
-# from io import BytesIO
-# from pdfminer.high_level import extract_text
+import re
+from io import BytesIO
+from pdfminer.high_level import extract_text
 
 
 
 
 def calculate_sample_count(total_page: int):
 def calculate_sample_count(total_page: int):
@@ -33,33 +33,33 @@ def extract_pages(src_pdf_bytes: bytes) -> fitz.Document:
     return sample_docs
     return sample_docs
 
 
 
 
-# def detect_invalid_chars(src_pdf_bytes: bytes) -> bool:
-#     """"
-#     检测PDF中是否包含非法字符
-#     """
-#     '''pdfminer比较慢,需要先随机抽取10页左右的sample'''
-#     sample_docs = extract_pages(src_pdf_bytes)
-#     sample_pdf_bytes = sample_docs.tobytes()
-#     sample_pdf_file_like_object = BytesIO(sample_pdf_bytes)
-#     text = extract_text(sample_pdf_file_like_object)
-#     text = text.replace("\n", "")
-#     # logger.info(text)
-#     '''乱码文本用pdfminer提取出来的文本特征是(cid:xxx)'''
-#     cid_pattern = re.compile(r'\(cid:\d+\)')
-#     matches = cid_pattern.findall(text)
-#     cid_count = len(matches)
-#     cid_len = sum(len(match) for match in matches)
-#     text_len = len(text)
-#     if text_len == 0:
-#         cid_chars_radio = 0
-#     else:
-#         cid_chars_radio = cid_count/(cid_count + text_len - cid_len)
-#     logger.info(f"cid_count: {cid_count}, text_len: {text_len}, cid_chars_radio: {cid_chars_radio}")
-#     '''当一篇文章存在5%以上的文本是乱码时,认为该文档为乱码文档'''
-#     if cid_chars_radio > 0.05:
-#         return False  # 乱码文档
-#     else:
-#         return True   # 正常文档
+def detect_invalid_chars(src_pdf_bytes: bytes) -> bool:
+    """"
+    检测PDF中是否包含非法字符
+    """
+    '''pdfminer比较慢,需要先随机抽取10页左右的sample'''
+    sample_docs = extract_pages(src_pdf_bytes)
+    sample_pdf_bytes = sample_docs.tobytes()
+    sample_pdf_file_like_object = BytesIO(sample_pdf_bytes)
+    text = extract_text(sample_pdf_file_like_object)
+    text = text.replace("\n", "")
+    # logger.info(text)
+    '''乱码文本用pdfminer提取出来的文本特征是(cid:xxx)'''
+    cid_pattern = re.compile(r'\(cid:\d+\)')
+    matches = cid_pattern.findall(text)
+    cid_count = len(matches)
+    cid_len = sum(len(match) for match in matches)
+    text_len = len(text)
+    if text_len == 0:
+        cid_chars_radio = 0
+    else:
+        cid_chars_radio = cid_count/(cid_count + text_len - cid_len)
+    logger.info(f"cid_count: {cid_count}, text_len: {text_len}, cid_chars_radio: {cid_chars_radio}")
+    '''当一篇文章存在5%以上的文本是乱码时,认为该文档为乱码文档'''
+    if cid_chars_radio > 0.05:
+        return False  # 乱码文档
+    else:
+        return True   # 正常文档
 
 
 
 
 def count_replacement_characters(text: str) -> int:
 def count_replacement_characters(text: str) -> int:

+ 4 - 0
magic_pdf/model/doc_analyze_by_custom_model.py

@@ -5,6 +5,10 @@ import fitz
 import numpy as np
 import numpy as np
 from loguru import logger
 from loguru import logger
 
 
+# 关闭paddle的信号处理
+import paddle
+paddle.disable_signal_handler()
+
 os.environ['NO_ALBUMENTATIONS_UPDATE'] = '1'  # 禁止albumentations检查更新
 os.environ['NO_ALBUMENTATIONS_UPDATE'] = '1'  # 禁止albumentations检查更新
 os.environ['YOLO_VERBOSE'] = 'False'  # disable yolo logger
 os.environ['YOLO_VERBOSE'] = 'False'  # disable yolo logger
 
 

+ 1 - 1
requirements.txt

@@ -9,5 +9,5 @@ PyMuPDF>=1.24.9
 scikit-learn>=1.0.2
 scikit-learn>=1.0.2
 torch>=2.2.2
 torch>=2.2.2
 transformers
 transformers
-# pdfminer.six==20231228
+pdfminer.six==20231228
 # The requirements.txt must ensure that only necessary external dependencies are introduced. If there are new dependencies to add, please contact the project administrator.
 # The requirements.txt must ensure that only necessary external dependencies are introduced. If there are new dependencies to add, please contact the project administrator.