pdf_parse_union_core_v2.py 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380
  1. import statistics
  2. import time
  3. from loguru import logger
  4. from typing import List
  5. import torch
  6. from magic_pdf.libs.commons import fitz, get_delta_time
  7. from magic_pdf.libs.convert_utils import dict_to_list
  8. from magic_pdf.libs.drop_reason import DropReason
  9. from magic_pdf.libs.hash_utils import compute_md5
  10. from magic_pdf.libs.local_math import float_equal
  11. from magic_pdf.libs.ocr_content_type import ContentType
  12. from magic_pdf.model.magic_model import MagicModel
  13. from magic_pdf.pre_proc.citationmarker_remove import remove_citation_marker
  14. from magic_pdf.pre_proc.construct_page_dict import ocr_construct_page_component_v2
  15. from magic_pdf.pre_proc.cut_image import ocr_cut_image_and_table
  16. from magic_pdf.pre_proc.equations_replace import remove_chars_in_text_blocks, replace_equations_in_textblock, \
  17. combine_chars_to_pymudict
  18. from magic_pdf.pre_proc.ocr_detect_all_bboxes import ocr_prepare_bboxes_for_layout_split_v2
  19. from magic_pdf.pre_proc.ocr_dict_merge import fill_spans_in_blocks, fix_block_spans, fix_discarded_block
  20. from magic_pdf.pre_proc.ocr_span_list_modify import remove_overlaps_min_spans, get_qa_need_list_v2, \
  21. remove_overlaps_low_confidence_spans
  22. from magic_pdf.pre_proc.resolve_bbox_conflict import check_useful_block_horizontal_overlap
  23. def remove_horizontal_overlap_block_which_smaller(all_bboxes):
  24. useful_blocks = []
  25. for bbox in all_bboxes:
  26. useful_blocks.append({
  27. "bbox": bbox[:4]
  28. })
  29. is_useful_block_horz_overlap, smaller_bbox, bigger_bbox = check_useful_block_horizontal_overlap(useful_blocks)
  30. if is_useful_block_horz_overlap:
  31. logger.warning(
  32. f"skip this page, reason: {DropReason.USEFUL_BLOCK_HOR_OVERLAP}, smaller bbox is {smaller_bbox}, bigger bbox is {bigger_bbox}")
  33. for bbox in all_bboxes.copy():
  34. if smaller_bbox == bbox[:4]:
  35. all_bboxes.remove(bbox)
  36. return is_useful_block_horz_overlap, all_bboxes
  37. def __replace_STX_ETX(text_str:str):
  38. """ Replace \u0002 and \u0003, as these characters become garbled when extracted using pymupdf. In fact, they were originally quotation marks.
  39. Drawback: This issue is only observed in English text; it has not been found in Chinese text so far.
  40. Args:
  41. text_str (str): raw text
  42. Returns:
  43. _type_: replaced text
  44. """
  45. if text_str:
  46. s = text_str.replace('\u0002', "'")
  47. s = s.replace("\u0003", "'")
  48. return s
  49. return text_str
  50. def txt_spans_extract(pdf_page, inline_equations, interline_equations):
  51. text_raw_blocks = pdf_page.get_text("dict", flags=fitz.TEXTFLAGS_TEXT)["blocks"]
  52. char_level_text_blocks = pdf_page.get_text("rawdict", flags=fitz.TEXTFLAGS_TEXT)[
  53. "blocks"
  54. ]
  55. text_blocks = combine_chars_to_pymudict(text_raw_blocks, char_level_text_blocks)
  56. text_blocks = replace_equations_in_textblock(
  57. text_blocks, inline_equations, interline_equations
  58. )
  59. text_blocks = remove_citation_marker(text_blocks)
  60. text_blocks = remove_chars_in_text_blocks(text_blocks)
  61. spans = []
  62. for v in text_blocks:
  63. for line in v["lines"]:
  64. for span in line["spans"]:
  65. bbox = span["bbox"]
  66. if float_equal(bbox[0], bbox[2]) or float_equal(bbox[1], bbox[3]):
  67. continue
  68. if span.get('type') not in (ContentType.InlineEquation, ContentType.InterlineEquation):
  69. spans.append(
  70. {
  71. "bbox": list(span["bbox"]),
  72. "content": __replace_STX_ETX(span["text"]),
  73. "type": ContentType.Text,
  74. "score": 1.0,
  75. }
  76. )
  77. return spans
  78. def replace_text_span(pymu_spans, ocr_spans):
  79. return list(filter(lambda x: x["type"] != ContentType.Text, ocr_spans)) + pymu_spans
  80. def model_init(model_name: str, local_path=None):
  81. from transformers import LayoutLMv3ForTokenClassification
  82. if torch.cuda.is_available():
  83. device = torch.device("cuda")
  84. if torch.cuda.is_bf16_supported():
  85. supports_bfloat16 = True
  86. else:
  87. supports_bfloat16 = False
  88. else:
  89. device = torch.device("cpu")
  90. supports_bfloat16 = False
  91. if model_name == "layoutreader":
  92. if local_path:
  93. model = LayoutLMv3ForTokenClassification.from_pretrained(local_path)
  94. else:
  95. model = LayoutLMv3ForTokenClassification.from_pretrained("hantian/layoutreader")
  96. # 检查设备是否支持 bfloat16
  97. if supports_bfloat16:
  98. model.bfloat16()
  99. model.to(device).eval()
  100. else:
  101. logger.error("model name not allow")
  102. exit(1)
  103. return model
  104. class ModelSingleton:
  105. _instance = None
  106. _models = {}
  107. def __new__(cls, *args, **kwargs):
  108. if cls._instance is None:
  109. cls._instance = super().__new__(cls)
  110. return cls._instance
  111. def get_model(self, model_name: str, local_path=None):
  112. if model_name not in self._models:
  113. if local_path:
  114. self._models[model_name] = model_init(model_name=model_name, local_path=local_path)
  115. else:
  116. self._models[model_name] = model_init(model_name=model_name)
  117. return self._models[model_name]
  118. def do_predict(boxes: List[List[int]], model) -> List[int]:
  119. from magic_pdf.model.v3 import prepare_inputs, boxes2inputs, parse_logits
  120. inputs = boxes2inputs(boxes)
  121. inputs = prepare_inputs(inputs, model)
  122. logits = model(**inputs).logits.cpu().squeeze(0)
  123. return parse_logits(logits, len(boxes))
  124. def cal_block_index(fix_blocks, sorted_bboxes):
  125. for block in fix_blocks:
  126. if block['type'] in ['text', 'title', 'interline_equation']:
  127. line_index_list = []
  128. if len(block['lines']) == 0:
  129. block['index'] = sorted_bboxes.index(block['bbox'])
  130. else:
  131. for line in block['lines']:
  132. line['index'] = sorted_bboxes.index(line['bbox'])
  133. line_index_list.append(line['index'])
  134. median_value = statistics.median(line_index_list)
  135. block['index'] = median_value
  136. elif block['type'] in ['table', 'image']:
  137. block['index'] = sorted_bboxes.index(block['bbox'])
  138. return fix_blocks
  139. def sort_lines_by_model(fix_blocks, page_w, page_h):
  140. page_line_list = []
  141. for block in fix_blocks:
  142. if block['type'] in ['text', 'title', 'interline_equation']:
  143. if len(block['lines']) == 0: # 没有line的block(一般是图片形式的文本块),就直接用block的bbox来排序
  144. bbox = block['bbox']
  145. page_line_list.append(bbox)
  146. else:
  147. for line in block['lines']:
  148. bbox = line['bbox']
  149. page_line_list.append(bbox)
  150. elif block['type'] in ['table', 'image']: # 简单的把表和图都当成一个line处理
  151. bbox = block['bbox']
  152. page_line_list.append(bbox)
  153. # 使用layoutreader排序
  154. x_scale = 1000.0 / page_w
  155. y_scale = 1000.0 / page_h
  156. boxes = []
  157. # logger.info(f"Scale: {x_scale}, {y_scale}, Boxes len: {len(page_line_list)}")
  158. for left, top, right, bottom in page_line_list:
  159. if left < 0:
  160. logger.warning(
  161. f"left < 0, left: {left}, right: {right}, top: {top}, bottom: {bottom}, page_w: {page_w}, page_h: {page_h}")
  162. left = 0
  163. if right > page_w:
  164. logger.warning(
  165. f"right > page_w, left: {left}, right: {right}, top: {top}, bottom: {bottom}, page_w: {page_w}, page_h: {page_h}")
  166. right = page_w
  167. if top < 0:
  168. logger.warning(
  169. f"top < 0, left: {left}, right: {right}, top: {top}, bottom: {bottom}, page_w: {page_w}, page_h: {page_h}")
  170. top = 0
  171. if bottom > page_h:
  172. logger.warning(
  173. f"bottom > page_h, left: {left}, right: {right}, top: {top}, bottom: {bottom}, page_w: {page_w}, page_h: {page_h}")
  174. bottom = page_h
  175. left = round(left * x_scale)
  176. top = round(top * y_scale)
  177. right = round(right * x_scale)
  178. bottom = round(bottom * y_scale)
  179. assert (
  180. 1000 >= right >= left >= 0 and 1000 >= bottom >= top >= 0
  181. ), f"Invalid box. right: {right}, left: {left}, bottom: {bottom}, top: {top}"
  182. boxes.append([left, top, right, bottom])
  183. model_manager = ModelSingleton()
  184. model = model_manager.get_model("layoutreader")
  185. with torch.no_grad():
  186. orders = do_predict(boxes, model)
  187. sorted_bboxes = [page_line_list[i] for i in orders]
  188. return sorted_bboxes
  189. def parse_page_core(pdf_docs, magic_model, page_id, pdf_bytes_md5, imageWriter, parse_mode):
  190. need_drop = False
  191. drop_reason = []
  192. '''从magic_model对象中获取后面会用到的区块信息'''
  193. img_blocks = magic_model.get_imgs(page_id)
  194. table_blocks = magic_model.get_tables(page_id)
  195. discarded_blocks = magic_model.get_discarded(page_id)
  196. text_blocks = magic_model.get_text_blocks(page_id)
  197. title_blocks = magic_model.get_title_blocks(page_id)
  198. inline_equations, interline_equations, interline_equation_blocks = magic_model.get_equations(page_id)
  199. page_w, page_h = magic_model.get_page_size(page_id)
  200. spans = magic_model.get_all_spans(page_id)
  201. '''根据parse_mode,构造spans'''
  202. if parse_mode == "txt":
  203. """ocr 中文本类的 span 用 pymu spans 替换!"""
  204. pymu_spans = txt_spans_extract(
  205. pdf_docs[page_id], inline_equations, interline_equations
  206. )
  207. spans = replace_text_span(pymu_spans, spans)
  208. elif parse_mode == "ocr":
  209. pass
  210. else:
  211. raise Exception("parse_mode must be txt or ocr")
  212. '''删除重叠spans中置信度较低的那些'''
  213. spans, dropped_spans_by_confidence = remove_overlaps_low_confidence_spans(spans)
  214. '''删除重叠spans中较小的那些'''
  215. spans, dropped_spans_by_span_overlap = remove_overlaps_min_spans(spans)
  216. '''对image和table截图'''
  217. spans = ocr_cut_image_and_table(spans, pdf_docs[page_id], page_id, pdf_bytes_md5, imageWriter)
  218. '''将所有区块的bbox整理到一起'''
  219. # interline_equation_blocks参数不够准,后面切换到interline_equations上
  220. interline_equation_blocks = []
  221. if len(interline_equation_blocks) > 0:
  222. all_bboxes, all_discarded_blocks = ocr_prepare_bboxes_for_layout_split_v2(
  223. img_blocks, table_blocks, discarded_blocks, text_blocks, title_blocks,
  224. interline_equation_blocks, page_w, page_h)
  225. else:
  226. all_bboxes, all_discarded_blocks = ocr_prepare_bboxes_for_layout_split_v2(
  227. img_blocks, table_blocks, discarded_blocks, text_blocks, title_blocks,
  228. interline_equations, page_w, page_h)
  229. '''先处理不需要排版的discarded_blocks'''
  230. discarded_block_with_spans, spans = fill_spans_in_blocks(all_discarded_blocks, spans, 0.4)
  231. fix_discarded_blocks = fix_discarded_block(discarded_block_with_spans)
  232. '''如果当前页面没有bbox则跳过'''
  233. if len(all_bboxes) == 0:
  234. logger.warning(f"skip this page, not found useful bbox, page_id: {page_id}")
  235. return ocr_construct_page_component_v2([], [], page_id, page_w, page_h, [],
  236. [], [], interline_equations, fix_discarded_blocks,
  237. need_drop, drop_reason)
  238. '''将span填入blocks中'''
  239. block_with_spans, spans = fill_spans_in_blocks(all_bboxes, spans, 0.3)
  240. '''对block进行fix操作'''
  241. fix_blocks = fix_block_spans(block_with_spans, img_blocks, table_blocks)
  242. '''获取所有line并对line排序'''
  243. sorted_bboxes = sort_lines_by_model(fix_blocks, page_w, page_h)
  244. '''根据line的中位数算block的序列关系'''
  245. fix_blocks = cal_block_index(fix_blocks, sorted_bboxes)
  246. '''重排block'''
  247. sorted_blocks = sorted(fix_blocks, key=lambda b: b['index'])
  248. '''获取QA需要外置的list'''
  249. images, tables, interline_equations = get_qa_need_list_v2(sorted_blocks)
  250. '''构造pdf_info_dict'''
  251. page_info = ocr_construct_page_component_v2(sorted_blocks, [], page_id, page_w, page_h, [],
  252. images, tables, interline_equations, fix_discarded_blocks,
  253. need_drop, drop_reason)
  254. return page_info
  255. def clean_memory():
  256. import gc
  257. if torch.cuda.is_available():
  258. torch.cuda.empty_cache()
  259. torch.cuda.ipc_collect()
  260. gc.collect()
  261. def pdf_parse_union(pdf_bytes,
  262. model_list,
  263. imageWriter,
  264. parse_mode,
  265. start_page_id=0,
  266. end_page_id=None,
  267. debug_mode=False,
  268. ):
  269. pdf_bytes_md5 = compute_md5(pdf_bytes)
  270. pdf_docs = fitz.open("pdf", pdf_bytes)
  271. '''初始化空的pdf_info_dict'''
  272. pdf_info_dict = {}
  273. '''用model_list和docs对象初始化magic_model'''
  274. magic_model = MagicModel(model_list, pdf_docs)
  275. '''根据输入的起始范围解析pdf'''
  276. # end_page_id = end_page_id if end_page_id else len(pdf_docs) - 1
  277. end_page_id = end_page_id if end_page_id is not None and end_page_id >= 0 else len(pdf_docs) - 1
  278. if end_page_id > len(pdf_docs) - 1:
  279. logger.warning("end_page_id is out of range, use pdf_docs length")
  280. end_page_id = len(pdf_docs) - 1
  281. '''初始化启动时间'''
  282. start_time = time.time()
  283. for page_id, page in enumerate(pdf_docs):
  284. '''debug时输出每页解析的耗时'''
  285. if debug_mode:
  286. time_now = time.time()
  287. logger.info(
  288. f"page_id: {page_id}, last_page_cost_time: {get_delta_time(start_time)}"
  289. )
  290. start_time = time_now
  291. '''解析pdf中的每一页'''
  292. if start_page_id <= page_id <= end_page_id:
  293. page_info = parse_page_core(pdf_docs, magic_model, page_id, pdf_bytes_md5, imageWriter, parse_mode)
  294. else:
  295. page_w = page.rect.width
  296. page_h = page.rect.height
  297. page_info = ocr_construct_page_component_v2([], [], page_id, page_w, page_h, [],
  298. [], [], [], [],
  299. True, "skip page")
  300. pdf_info_dict[f"page_{page_id}"] = page_info
  301. """分段"""
  302. # para_split(pdf_info_dict, debug_mode=debug_mode)
  303. for page_num, page in pdf_info_dict.items():
  304. page['para_blocks'] = page['preproc_blocks']
  305. """dict转list"""
  306. pdf_info_list = dict_to_list(pdf_info_dict)
  307. new_pdf_info_dict = {
  308. "pdf_info": pdf_info_list,
  309. }
  310. clean_memory()
  311. return new_pdf_info_dict
  312. if __name__ == '__main__':
  313. pass