model_json_to_middle_json.py 8.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201
  1. # Copyright (c) Opendatalab. All rights reserved.
  2. import time
  3. from loguru import logger
  4. from mineru.backend.pipeline.config_reader import get_device, get_llm_aided_config
  5. from mineru.backend.pipeline.model_init import AtomModelSingleton
  6. from mineru.backend.pipeline.para_split import para_split
  7. from mineru.utils.block_pre_proc import prepare_block_bboxes, process_groups
  8. from mineru.utils.block_sort import sort_blocks_by_bbox
  9. from mineru.utils.cut_image import cut_image_and_table
  10. from mineru.utils.llm_aided import llm_aided_title
  11. from mineru.utils.model_utils import clean_memory
  12. from mineru.utils.pipeline_magic_model import MagicModel
  13. from mineru.utils.span_block_fix import fill_spans_in_blocks, fix_discarded_block, fix_block_spans
  14. from mineru.utils.span_pre_proc import remove_outside_spans, remove_overlaps_low_confidence_spans, \
  15. remove_overlaps_min_spans, txt_spans_extract_v3
  16. from mineru.version import __version__
  17. from mineru.utils.hash_utils import str_md5
  18. def page_model_info_to_page_info(page_model_info, image_dict, page, image_writer, page_index, ocr=False):
  19. scale = image_dict["scale"]
  20. page_pil_img = image_dict["img_pil"]
  21. page_img_md5 = str_md5(image_dict["img_base64"])
  22. page_w, page_h = map(int, page.get_size())
  23. magic_model = MagicModel(page_model_info, scale)
  24. """从magic_model对象中获取后面会用到的区块信息"""
  25. img_groups = magic_model.get_imgs()
  26. table_groups = magic_model.get_tables()
  27. """对image和table的区块分组"""
  28. img_body_blocks, img_caption_blocks, img_footnote_blocks = process_groups(
  29. img_groups, 'image_body', 'image_caption_list', 'image_footnote_list'
  30. )
  31. table_body_blocks, table_caption_blocks, table_footnote_blocks = process_groups(
  32. table_groups, 'table_body', 'table_caption_list', 'table_footnote_list'
  33. )
  34. discarded_blocks = magic_model.get_discarded()
  35. text_blocks = magic_model.get_text_blocks()
  36. title_blocks = magic_model.get_title_blocks()
  37. inline_equations, interline_equations, interline_equation_blocks = magic_model.get_equations()
  38. """将所有区块的bbox整理到一起"""
  39. interline_equation_blocks = []
  40. if len(interline_equation_blocks) > 0:
  41. all_bboxes, all_discarded_blocks, footnote_blocks = prepare_block_bboxes(
  42. img_body_blocks, img_caption_blocks, img_footnote_blocks,
  43. table_body_blocks, table_caption_blocks, table_footnote_blocks,
  44. discarded_blocks,
  45. text_blocks,
  46. title_blocks,
  47. interline_equation_blocks,
  48. page_w,
  49. page_h,
  50. )
  51. else:
  52. all_bboxes, all_discarded_blocks, footnote_blocks = prepare_block_bboxes(
  53. img_body_blocks, img_caption_blocks, img_footnote_blocks,
  54. table_body_blocks, table_caption_blocks, table_footnote_blocks,
  55. discarded_blocks,
  56. text_blocks,
  57. title_blocks,
  58. interline_equations,
  59. page_w,
  60. page_h,
  61. )
  62. """获取所有的spans信息"""
  63. spans = magic_model.get_all_spans()
  64. """在删除重复span之前,应该通过image_body和table_body的block过滤一下image和table的span"""
  65. """顺便删除大水印并保留abandon的span"""
  66. spans = remove_outside_spans(spans, all_bboxes, all_discarded_blocks)
  67. """删除重叠spans中置信度较低的那些"""
  68. spans, dropped_spans_by_confidence = remove_overlaps_low_confidence_spans(spans)
  69. """删除重叠spans中较小的那些"""
  70. spans, dropped_spans_by_span_overlap = remove_overlaps_min_spans(spans)
  71. """根据parse_mode,构造spans,主要是文本类的字符填充"""
  72. if ocr:
  73. pass
  74. else:
  75. """使用新版本的混合ocr方案."""
  76. spans = txt_spans_extract_v3(page, spans, page_pil_img, scale, all_bboxes, all_discarded_blocks)
  77. """先处理不需要排版的discarded_blocks"""
  78. discarded_block_with_spans, spans = fill_spans_in_blocks(
  79. all_discarded_blocks, spans, 0.4
  80. )
  81. fix_discarded_blocks = fix_discarded_block(discarded_block_with_spans)
  82. """如果当前页面没有有效的bbox则跳过"""
  83. if len(all_bboxes) == 0:
  84. return None
  85. """对image和table截图"""
  86. for span in spans:
  87. if span['type'] in ['image', 'table']:
  88. span = cut_image_and_table(
  89. span, page_pil_img, page_img_md5, page_index, image_writer, scale=scale
  90. )
  91. """span填充进block"""
  92. block_with_spans, spans = fill_spans_in_blocks(all_bboxes, spans, 0.5)
  93. """对block进行fix操作"""
  94. fix_blocks = fix_block_spans(block_with_spans)
  95. """同一行被断开的titile合并"""
  96. # merge_title_blocks(fix_blocks)
  97. """对block进行排序"""
  98. sorted_blocks = sort_blocks_by_bbox(fix_blocks, page_w, page_h, footnote_blocks)
  99. """构造page_info"""
  100. page_info = make_page_info_dict(sorted_blocks, page_index, page_w, page_h, fix_discarded_blocks)
  101. return page_info
  102. def result_to_middle_json(model_list, images_list, pdf_doc, image_writer, lang=None, ocr=False):
  103. middle_json = {"pdf_info": [], "_backend":"pipeline", "_version_name": __version__}
  104. for page_index, page_model_info in enumerate(model_list):
  105. page = pdf_doc[page_index]
  106. image_dict = images_list[page_index]
  107. page_info = page_model_info_to_page_info(
  108. page_model_info, image_dict, page, image_writer, page_index, ocr=ocr
  109. )
  110. if page_info is None:
  111. page_w, page_h = map(int, page.get_size())
  112. page_info = make_page_info_dict([], page_index, page_w, page_h, [])
  113. middle_json["pdf_info"].append(page_info)
  114. """后置ocr处理"""
  115. need_ocr_list = []
  116. img_crop_list = []
  117. text_block_list = []
  118. for page_info in middle_json["pdf_info"]:
  119. for block in page_info['preproc_blocks']:
  120. if block['type'] in ['table', 'image']:
  121. for sub_block in block['blocks']:
  122. if sub_block['type'] in ['image_caption', 'image_footnote', 'table_caption', 'table_footnote']:
  123. text_block_list.append(sub_block)
  124. elif block['type'] in ['text', 'title']:
  125. text_block_list.append(block)
  126. for block in page_info['discarded_blocks']:
  127. text_block_list.append(block)
  128. for block in text_block_list:
  129. for line in block['lines']:
  130. for span in line['spans']:
  131. if 'np_img' in span:
  132. need_ocr_list.append(span)
  133. img_crop_list.append(span['np_img'])
  134. span.pop('np_img')
  135. if len(img_crop_list) > 0:
  136. atom_model_manager = AtomModelSingleton()
  137. ocr_model = atom_model_manager.get_atom_model(
  138. atom_model_name='ocr',
  139. ocr_show_log=False,
  140. det_db_box_thresh=0.3,
  141. lang=lang
  142. )
  143. ocr_res_list = ocr_model.ocr(img_crop_list, det=False, tqdm_enable=True)[0]
  144. assert len(ocr_res_list) == len(
  145. need_ocr_list), f'ocr_res_list: {len(ocr_res_list)}, need_ocr_list: {len(need_ocr_list)}'
  146. for index, span in enumerate(need_ocr_list):
  147. ocr_text, ocr_score = ocr_res_list[index]
  148. span['content'] = ocr_text
  149. span['score'] = float(f"{ocr_score:.3f}")
  150. """分段"""
  151. para_split(middle_json["pdf_info"])
  152. """llm优化"""
  153. llm_aided_config = get_llm_aided_config()
  154. if llm_aided_config is not None:
  155. """标题优化"""
  156. title_aided_config = llm_aided_config.get('title_aided', None)
  157. if title_aided_config is not None:
  158. if title_aided_config.get('enable', False):
  159. llm_aided_title_start_time = time.time()
  160. llm_aided_title(middle_json["pdf_info"], title_aided_config)
  161. logger.info(f'llm aided title time: {round(time.time() - llm_aided_title_start_time, 2)}')
  162. clean_memory(get_device())
  163. return middle_json
  164. def make_page_info_dict(blocks, page_id, page_w, page_h, discarded_blocks):
  165. return_dict = {
  166. 'preproc_blocks': blocks,
  167. 'page_idx': page_id,
  168. 'page_size': [page_w, page_h],
  169. 'discarded_blocks': discarded_blocks,
  170. }
  171. return return_dict