model_json_to_middle_json.py 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248
  1. # Copyright (c) Opendatalab. All rights reserved.
  2. import time
  3. from loguru import logger
  4. from mineru.utils.config_reader import get_device, get_llm_aided_config
  5. from mineru.backend.pipeline.model_init import AtomModelSingleton
  6. from mineru.backend.pipeline.para_split import para_split
  7. from mineru.utils.block_pre_proc import prepare_block_bboxes, process_groups
  8. from mineru.utils.block_sort import sort_blocks_by_bbox
  9. from mineru.utils.boxbase import calculate_overlap_area_in_bbox1_area_ratio
  10. from mineru.utils.cut_image import cut_image_and_table
  11. from mineru.utils.enum_class import ContentType
  12. from mineru.utils.llm_aided import llm_aided_title
  13. from mineru.utils.model_utils import clean_memory
  14. from mineru.backend.pipeline.pipeline_magic_model import MagicModel
  15. from mineru.utils.ocr_utils import OcrConfidence
  16. from mineru.utils.span_block_fix import fill_spans_in_blocks, fix_discarded_block, fix_block_spans
  17. from mineru.utils.span_pre_proc import remove_outside_spans, remove_overlaps_low_confidence_spans, \
  18. remove_overlaps_min_spans, txt_spans_extract
  19. from mineru.version import __version__
  20. from mineru.utils.hash_utils import str_md5
  21. def page_model_info_to_page_info(page_model_info, image_dict, page, image_writer, page_index, ocr_enable=False, formula_enabled=True):
  22. scale = image_dict["scale"]
  23. page_pil_img = image_dict["img_pil"]
  24. page_img_md5 = str_md5(image_dict["img_base64"])
  25. page_w, page_h = map(int, page.get_size())
  26. magic_model = MagicModel(page_model_info, scale)
  27. """从magic_model对象中获取后面会用到的区块信息"""
  28. discarded_blocks = magic_model.get_discarded()
  29. text_blocks = magic_model.get_text_blocks()
  30. title_blocks = magic_model.get_title_blocks()
  31. inline_equations, interline_equations, interline_equation_blocks = magic_model.get_equations()
  32. img_groups = magic_model.get_imgs()
  33. table_groups = magic_model.get_tables()
  34. """对image和table的区块分组"""
  35. img_body_blocks, img_caption_blocks, img_footnote_blocks, maybe_text_image_blocks = process_groups(
  36. img_groups, 'image_body', 'image_caption_list', 'image_footnote_list'
  37. )
  38. table_body_blocks, table_caption_blocks, table_footnote_blocks, _ = process_groups(
  39. table_groups, 'table_body', 'table_caption_list', 'table_footnote_list'
  40. )
  41. """获取所有的spans信息"""
  42. spans = magic_model.get_all_spans()
  43. """某些图可能是文本块,通过简单的规则判断一下"""
  44. if len(maybe_text_image_blocks) > 0:
  45. for block in maybe_text_image_blocks:
  46. span_in_block_list = []
  47. for span in spans:
  48. if span['type'] == 'text' and calculate_overlap_area_in_bbox1_area_ratio(span['bbox'], block['bbox']) > 0.7:
  49. span_in_block_list.append(span)
  50. if len(span_in_block_list) > 0:
  51. # span_in_block_list中所有bbox的面积之和
  52. spans_area = sum((span['bbox'][2] - span['bbox'][0]) * (span['bbox'][3] - span['bbox'][1]) for span in span_in_block_list)
  53. # 求ocr_res_area和res的面积的比值
  54. block_area = (block['bbox'][2] - block['bbox'][0]) * (block['bbox'][3] - block['bbox'][1])
  55. if block_area > 0:
  56. ratio = spans_area / block_area
  57. if ratio > 0.25 and ocr_enable:
  58. # 移除block的group_id
  59. block.pop('group_id', None)
  60. # 符合文本图的条件就把块加入到文本块列表中
  61. text_blocks.append(block)
  62. else:
  63. # 如果不符合文本图的条件,就把块加回到图片块列表中
  64. img_body_blocks.append(block)
  65. else:
  66. img_body_blocks.append(block)
  67. """将所有区块的bbox整理到一起"""
  68. if formula_enabled:
  69. interline_equation_blocks = []
  70. if len(interline_equation_blocks) > 0:
  71. for block in interline_equation_blocks:
  72. spans.append({
  73. "type": ContentType.INTERLINE_EQUATION,
  74. 'score': block['score'],
  75. "bbox": block['bbox'],
  76. })
  77. all_bboxes, all_discarded_blocks, footnote_blocks = prepare_block_bboxes(
  78. img_body_blocks, img_caption_blocks, img_footnote_blocks,
  79. table_body_blocks, table_caption_blocks, table_footnote_blocks,
  80. discarded_blocks,
  81. text_blocks,
  82. title_blocks,
  83. interline_equation_blocks,
  84. page_w,
  85. page_h,
  86. )
  87. else:
  88. all_bboxes, all_discarded_blocks, footnote_blocks = prepare_block_bboxes(
  89. img_body_blocks, img_caption_blocks, img_footnote_blocks,
  90. table_body_blocks, table_caption_blocks, table_footnote_blocks,
  91. discarded_blocks,
  92. text_blocks,
  93. title_blocks,
  94. interline_equations,
  95. page_w,
  96. page_h,
  97. )
  98. """在删除重复span之前,应该通过image_body和table_body的block过滤一下image和table的span"""
  99. """顺便删除大水印并保留abandon的span"""
  100. spans = remove_outside_spans(spans, all_bboxes, all_discarded_blocks)
  101. """删除重叠spans中置信度较低的那些"""
  102. spans, dropped_spans_by_confidence = remove_overlaps_low_confidence_spans(spans)
  103. """删除重叠spans中较小的那些"""
  104. spans, dropped_spans_by_span_overlap = remove_overlaps_min_spans(spans)
  105. """根据parse_mode,构造spans,主要是文本类的字符填充"""
  106. if ocr_enable:
  107. pass
  108. else:
  109. """使用新版本的混合ocr方案."""
  110. spans = txt_spans_extract(page, spans, page_pil_img, scale, all_bboxes, all_discarded_blocks)
  111. """先处理不需要排版的discarded_blocks"""
  112. discarded_block_with_spans, spans = fill_spans_in_blocks(
  113. all_discarded_blocks, spans, 0.4
  114. )
  115. fix_discarded_blocks = fix_discarded_block(discarded_block_with_spans)
  116. """如果当前页面没有有效的bbox则跳过"""
  117. if len(all_bboxes) == 0:
  118. return None
  119. """对image/table/interline_equation截图"""
  120. for span in spans:
  121. if span['type'] in [ContentType.IMAGE, ContentType.TABLE, ContentType.INTERLINE_EQUATION]:
  122. span = cut_image_and_table(
  123. span, page_pil_img, page_img_md5, page_index, image_writer, scale=scale
  124. )
  125. """span填充进block"""
  126. block_with_spans, spans = fill_spans_in_blocks(all_bboxes, spans, 0.5)
  127. """对block进行fix操作"""
  128. fix_blocks = fix_block_spans(block_with_spans)
  129. """同一行被断开的titile合并"""
  130. # merge_title_blocks(fix_blocks)
  131. """对block进行排序"""
  132. sorted_blocks = sort_blocks_by_bbox(fix_blocks, page_w, page_h, footnote_blocks)
  133. """构造page_info"""
  134. page_info = make_page_info_dict(sorted_blocks, page_index, page_w, page_h, fix_discarded_blocks)
  135. return page_info
  136. def result_to_middle_json(model_list, images_list, pdf_doc, image_writer, lang=None, ocr_enable=False, formula_enabled=True):
  137. middle_json = {"pdf_info": [], "_backend":"pipeline", "_version_name": __version__}
  138. for page_index, page_model_info in enumerate(model_list):
  139. page = pdf_doc[page_index]
  140. image_dict = images_list[page_index]
  141. page_info = page_model_info_to_page_info(
  142. page_model_info, image_dict, page, image_writer, page_index, ocr_enable=ocr_enable, formula_enabled=formula_enabled
  143. )
  144. if page_info is None:
  145. page_w, page_h = map(int, page.get_size())
  146. page_info = make_page_info_dict([], page_index, page_w, page_h, [])
  147. middle_json["pdf_info"].append(page_info)
  148. """后置ocr处理"""
  149. need_ocr_list = []
  150. img_crop_list = []
  151. text_block_list = []
  152. for page_info in middle_json["pdf_info"]:
  153. for block in page_info['preproc_blocks']:
  154. if block['type'] in ['table', 'image']:
  155. for sub_block in block['blocks']:
  156. if sub_block['type'] in ['image_caption', 'image_footnote', 'table_caption', 'table_footnote']:
  157. text_block_list.append(sub_block)
  158. elif block['type'] in ['text', 'title']:
  159. text_block_list.append(block)
  160. for block in page_info['discarded_blocks']:
  161. text_block_list.append(block)
  162. for block in text_block_list:
  163. for line in block['lines']:
  164. for span in line['spans']:
  165. if 'np_img' in span:
  166. need_ocr_list.append(span)
  167. img_crop_list.append(span['np_img'])
  168. span.pop('np_img')
  169. if len(img_crop_list) > 0:
  170. atom_model_manager = AtomModelSingleton()
  171. ocr_model = atom_model_manager.get_atom_model(
  172. atom_model_name='ocr',
  173. ocr_show_log=False,
  174. det_db_box_thresh=0.3,
  175. lang=lang
  176. )
  177. ocr_res_list = ocr_model.ocr(img_crop_list, det=False, tqdm_enable=True)[0]
  178. assert len(ocr_res_list) == len(
  179. need_ocr_list), f'ocr_res_list: {len(ocr_res_list)}, need_ocr_list: {len(need_ocr_list)}'
  180. for index, span in enumerate(need_ocr_list):
  181. ocr_text, ocr_score = ocr_res_list[index]
  182. if ocr_score > OcrConfidence.min_confidence:
  183. span['content'] = ocr_text
  184. span['score'] = float(f"{ocr_score:.3f}")
  185. else:
  186. span['content'] = ''
  187. span['score'] = 0.0
  188. """分段"""
  189. para_split(middle_json["pdf_info"])
  190. """llm优化"""
  191. llm_aided_config = get_llm_aided_config()
  192. if llm_aided_config is not None:
  193. """标题优化"""
  194. title_aided_config = llm_aided_config.get('title_aided', None)
  195. if title_aided_config is not None:
  196. if title_aided_config.get('enable', False):
  197. llm_aided_title_start_time = time.time()
  198. llm_aided_title(middle_json["pdf_info"], title_aided_config)
  199. logger.info(f'llm aided title time: {round(time.time() - llm_aided_title_start_time, 2)}')
  200. """清理内存"""
  201. pdf_doc.close()
  202. clean_memory(get_device())
  203. return middle_json
  204. def make_page_info_dict(blocks, page_id, page_w, page_h, discarded_blocks):
  205. return_dict = {
  206. 'preproc_blocks': blocks,
  207. 'page_idx': page_id,
  208. 'page_size': [page_w, page_h],
  209. 'discarded_blocks': discarded_blocks,
  210. }
  211. return return_dict