pdf_parse_union_core_v2.py 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683
  1. import copy
  2. import os
  3. import statistics
  4. import time
  5. from typing import List
  6. import torch
  7. from loguru import logger
  8. from magic_pdf.config.drop_reason import DropReason
  9. from magic_pdf.config.enums import SupportedPdfParseMethod
  10. from magic_pdf.config.ocr_content_type import BlockType, ContentType
  11. from magic_pdf.data.dataset import Dataset, PageableData
  12. from magic_pdf.libs.boxbase import calculate_overlap_area_in_bbox1_area_ratio
  13. from magic_pdf.libs.clean_memory import clean_memory
  14. from magic_pdf.libs.commons import fitz, get_delta_time
  15. from magic_pdf.libs.config_reader import get_local_layoutreader_model_dir
  16. from magic_pdf.libs.convert_utils import dict_to_list
  17. from magic_pdf.libs.hash_utils import compute_md5
  18. from magic_pdf.libs.local_math import float_equal
  19. from magic_pdf.model.magic_model import MagicModel
  20. from magic_pdf.para.para_split_v3 import para_split
  21. from magic_pdf.pre_proc.citationmarker_remove import remove_citation_marker
  22. from magic_pdf.pre_proc.construct_page_dict import \
  23. ocr_construct_page_component_v2
  24. from magic_pdf.pre_proc.cut_image import ocr_cut_image_and_table
  25. from magic_pdf.pre_proc.equations_replace import (
  26. combine_chars_to_pymudict, remove_chars_in_text_blocks,
  27. replace_equations_in_textblock)
  28. from magic_pdf.pre_proc.ocr_detect_all_bboxes import \
  29. ocr_prepare_bboxes_for_layout_split_v2
  30. from magic_pdf.pre_proc.ocr_dict_merge import (fill_spans_in_blocks,
  31. fix_block_spans_v2,
  32. fix_discarded_block)
  33. from magic_pdf.pre_proc.ocr_span_list_modify import (
  34. get_qa_need_list_v2, remove_overlaps_low_confidence_spans,
  35. remove_overlaps_min_spans)
  36. from magic_pdf.pre_proc.resolve_bbox_conflict import \
  37. check_useful_block_horizontal_overlap
  38. def remove_horizontal_overlap_block_which_smaller(all_bboxes):
  39. useful_blocks = []
  40. for bbox in all_bboxes:
  41. useful_blocks.append({'bbox': bbox[:4]})
  42. is_useful_block_horz_overlap, smaller_bbox, bigger_bbox = (
  43. check_useful_block_horizontal_overlap(useful_blocks)
  44. )
  45. if is_useful_block_horz_overlap:
  46. logger.warning(
  47. f'skip this page, reason: {DropReason.USEFUL_BLOCK_HOR_OVERLAP}, smaller bbox is {smaller_bbox}, bigger bbox is {bigger_bbox}'
  48. ) # noqa: E501
  49. for bbox in all_bboxes.copy():
  50. if smaller_bbox == bbox[:4]:
  51. all_bboxes.remove(bbox)
  52. return is_useful_block_horz_overlap, all_bboxes
  53. def __replace_STX_ETX(text_str: str):
  54. """Replace \u0002 and \u0003, as these characters become garbled when extracted using pymupdf. In fact, they were originally quotation marks.
  55. Drawback: This issue is only observed in English text; it has not been found in Chinese text so far.
  56. Args:
  57. text_str (str): raw text
  58. Returns:
  59. _type_: replaced text
  60. """ # noqa: E501
  61. if text_str:
  62. s = text_str.replace('\u0002', "'")
  63. s = s.replace('\u0003', "'")
  64. return s
  65. return text_str
  66. def txt_spans_extract(pdf_page, inline_equations, interline_equations):
  67. text_raw_blocks = pdf_page.get_text('dict', flags=fitz.TEXTFLAGS_TEXT)['blocks']
  68. char_level_text_blocks = pdf_page.get_text('rawdict', flags=fitz.TEXTFLAGS_TEXT)[
  69. 'blocks'
  70. ]
  71. text_blocks = combine_chars_to_pymudict(text_raw_blocks, char_level_text_blocks)
  72. text_blocks = replace_equations_in_textblock(
  73. text_blocks, inline_equations, interline_equations
  74. )
  75. text_blocks = remove_citation_marker(text_blocks)
  76. text_blocks = remove_chars_in_text_blocks(text_blocks)
  77. spans = []
  78. for v in text_blocks:
  79. for line in v['lines']:
  80. for span in line['spans']:
  81. bbox = span['bbox']
  82. if float_equal(bbox[0], bbox[2]) or float_equal(bbox[1], bbox[3]):
  83. continue
  84. if span.get('type') not in (
  85. ContentType.InlineEquation,
  86. ContentType.InterlineEquation,
  87. ):
  88. spans.append(
  89. {
  90. 'bbox': list(span['bbox']),
  91. 'content': __replace_STX_ETX(span['text']),
  92. 'type': ContentType.Text,
  93. 'score': 1.0,
  94. }
  95. )
  96. return spans
  97. def replace_text_span(pymu_spans, ocr_spans):
  98. return list(filter(lambda x: x['type'] != ContentType.Text, ocr_spans)) + pymu_spans
  99. def model_init(model_name: str):
  100. from transformers import LayoutLMv3ForTokenClassification
  101. if torch.cuda.is_available():
  102. device = torch.device('cuda')
  103. if torch.cuda.is_bf16_supported():
  104. supports_bfloat16 = True
  105. else:
  106. supports_bfloat16 = False
  107. else:
  108. device = torch.device('cpu')
  109. supports_bfloat16 = False
  110. if model_name == 'layoutreader':
  111. # 检测modelscope的缓存目录是否存在
  112. layoutreader_model_dir = get_local_layoutreader_model_dir()
  113. if os.path.exists(layoutreader_model_dir):
  114. model = LayoutLMv3ForTokenClassification.from_pretrained(
  115. layoutreader_model_dir
  116. )
  117. else:
  118. logger.warning(
  119. 'local layoutreader model not exists, use online model from huggingface'
  120. )
  121. model = LayoutLMv3ForTokenClassification.from_pretrained(
  122. 'hantian/layoutreader'
  123. )
  124. # 检查设备是否支持 bfloat16
  125. if supports_bfloat16:
  126. model.bfloat16()
  127. model.to(device).eval()
  128. else:
  129. logger.error('model name not allow')
  130. exit(1)
  131. return model
  132. class ModelSingleton:
  133. _instance = None
  134. _models = {}
  135. def __new__(cls, *args, **kwargs):
  136. if cls._instance is None:
  137. cls._instance = super().__new__(cls)
  138. return cls._instance
  139. def get_model(self, model_name: str):
  140. if model_name not in self._models:
  141. self._models[model_name] = model_init(model_name=model_name)
  142. return self._models[model_name]
  143. def do_predict(boxes: List[List[int]], model) -> List[int]:
  144. from magic_pdf.model.sub_modules.reading_oreder.layoutreader.helpers import (
  145. boxes2inputs, parse_logits, prepare_inputs)
  146. inputs = boxes2inputs(boxes)
  147. inputs = prepare_inputs(inputs, model)
  148. logits = model(**inputs).logits.cpu().squeeze(0)
  149. return parse_logits(logits, len(boxes))
  150. def cal_block_index(fix_blocks, sorted_bboxes):
  151. if sorted_bboxes is not None:
  152. # 使用layoutreader排序
  153. for block in fix_blocks:
  154. line_index_list = []
  155. if len(block['lines']) == 0:
  156. block['index'] = sorted_bboxes.index(block['bbox'])
  157. else:
  158. for line in block['lines']:
  159. line['index'] = sorted_bboxes.index(line['bbox'])
  160. line_index_list.append(line['index'])
  161. median_value = statistics.median(line_index_list)
  162. block['index'] = median_value
  163. # 删除图表body block中的虚拟line信息, 并用real_lines信息回填
  164. if block['type'] in [BlockType.ImageBody, BlockType.TableBody]:
  165. block['virtual_lines'] = copy.deepcopy(block['lines'])
  166. block['lines'] = copy.deepcopy(block['real_lines'])
  167. del block['real_lines']
  168. else:
  169. # 使用xycut排序
  170. block_bboxes = []
  171. for block in fix_blocks:
  172. block_bboxes.append(block['bbox'])
  173. # 删除图表body block中的虚拟line信息, 并用real_lines信息回填
  174. if block['type'] in [BlockType.ImageBody, BlockType.TableBody]:
  175. block['virtual_lines'] = copy.deepcopy(block['lines'])
  176. block['lines'] = copy.deepcopy(block['real_lines'])
  177. del block['real_lines']
  178. import numpy as np
  179. from magic_pdf.model.sub_modules.reading_oreder.layoutreader.xycut import \
  180. recursive_xy_cut
  181. random_boxes = np.array(block_bboxes)
  182. np.random.shuffle(random_boxes)
  183. res = []
  184. recursive_xy_cut(np.asarray(random_boxes).astype(int), np.arange(len(block_bboxes)), res)
  185. assert len(res) == len(block_bboxes)
  186. sorted_boxes = random_boxes[np.array(res)].tolist()
  187. for i, block in enumerate(fix_blocks):
  188. block['index'] = sorted_boxes.index(block['bbox'])
  189. # 生成line index
  190. sorted_blocks = sorted(fix_blocks, key=lambda b: b['index'])
  191. line_inedx = 1
  192. for block in sorted_blocks:
  193. for line in block['lines']:
  194. line['index'] = line_inedx
  195. line_inedx += 1
  196. return fix_blocks
  197. def insert_lines_into_block(block_bbox, line_height, page_w, page_h):
  198. # block_bbox是一个元组(x0, y0, x1, y1),其中(x0, y0)是左下角坐标,(x1, y1)是右上角坐标
  199. x0, y0, x1, y1 = block_bbox
  200. block_height = y1 - y0
  201. block_weight = x1 - x0
  202. # 如果block高度小于n行正文,则直接返回block的bbox
  203. if line_height * 3 < block_height:
  204. if (
  205. block_height > page_h * 0.25 and page_w * 0.5 > block_weight > page_w * 0.25
  206. ): # 可能是双列结构,可以切细点
  207. lines = int(block_height / line_height) + 1
  208. else:
  209. # 如果block的宽度超过0.4页面宽度,则将block分成3行(是一种复杂布局,图不能切的太细)
  210. if block_weight > page_w * 0.4:
  211. line_height = (y1 - y0) / 3
  212. lines = 3
  213. elif block_weight > page_w * 0.25: # (可能是三列结构,也切细点)
  214. lines = int(block_height / line_height) + 1
  215. else: # 判断长宽比
  216. if block_height / block_weight > 1.2: # 细长的不分
  217. return [[x0, y0, x1, y1]]
  218. else: # 不细长的还是分成两行
  219. line_height = (y1 - y0) / 2
  220. lines = 2
  221. # 确定从哪个y位置开始绘制线条
  222. current_y = y0
  223. # 用于存储线条的位置信息[(x0, y), ...]
  224. lines_positions = []
  225. for i in range(lines):
  226. lines_positions.append([x0, current_y, x1, current_y + line_height])
  227. current_y += line_height
  228. return lines_positions
  229. else:
  230. return [[x0, y0, x1, y1]]
  231. def sort_lines_by_model(fix_blocks, page_w, page_h, line_height):
  232. page_line_list = []
  233. for block in fix_blocks:
  234. if block['type'] in [
  235. BlockType.Text, BlockType.Title, BlockType.InterlineEquation,
  236. BlockType.ImageCaption, BlockType.ImageFootnote,
  237. BlockType.TableCaption, BlockType.TableFootnote
  238. ]:
  239. if len(block['lines']) == 0:
  240. bbox = block['bbox']
  241. lines = insert_lines_into_block(bbox, line_height, page_w, page_h)
  242. for line in lines:
  243. block['lines'].append({'bbox': line, 'spans': []})
  244. page_line_list.extend(lines)
  245. else:
  246. for line in block['lines']:
  247. bbox = line['bbox']
  248. page_line_list.append(bbox)
  249. elif block['type'] in [BlockType.ImageBody, BlockType.TableBody]:
  250. bbox = block['bbox']
  251. block['real_lines'] = copy.deepcopy(block['lines'])
  252. lines = insert_lines_into_block(bbox, line_height, page_w, page_h)
  253. block['lines'] = []
  254. for line in lines:
  255. block['lines'].append({'bbox': line, 'spans': []})
  256. page_line_list.extend(lines)
  257. if len(page_line_list) > 200: # layoutreader最高支持512line
  258. return None
  259. # 使用layoutreader排序
  260. x_scale = 1000.0 / page_w
  261. y_scale = 1000.0 / page_h
  262. boxes = []
  263. # logger.info(f"Scale: {x_scale}, {y_scale}, Boxes len: {len(page_line_list)}")
  264. for left, top, right, bottom in page_line_list:
  265. if left < 0:
  266. logger.warning(
  267. f'left < 0, left: {left}, right: {right}, top: {top}, bottom: {bottom}, page_w: {page_w}, page_h: {page_h}'
  268. ) # noqa: E501
  269. left = 0
  270. if right > page_w:
  271. logger.warning(
  272. f'right > page_w, left: {left}, right: {right}, top: {top}, bottom: {bottom}, page_w: {page_w}, page_h: {page_h}'
  273. ) # noqa: E501
  274. right = page_w
  275. if top < 0:
  276. logger.warning(
  277. f'top < 0, left: {left}, right: {right}, top: {top}, bottom: {bottom}, page_w: {page_w}, page_h: {page_h}'
  278. ) # noqa: E501
  279. top = 0
  280. if bottom > page_h:
  281. logger.warning(
  282. f'bottom > page_h, left: {left}, right: {right}, top: {top}, bottom: {bottom}, page_w: {page_w}, page_h: {page_h}'
  283. ) # noqa: E501
  284. bottom = page_h
  285. left = round(left * x_scale)
  286. top = round(top * y_scale)
  287. right = round(right * x_scale)
  288. bottom = round(bottom * y_scale)
  289. assert (
  290. 1000 >= right >= left >= 0 and 1000 >= bottom >= top >= 0
  291. ), f'Invalid box. right: {right}, left: {left}, bottom: {bottom}, top: {top}' # noqa: E126, E121
  292. boxes.append([left, top, right, bottom])
  293. model_manager = ModelSingleton()
  294. model = model_manager.get_model('layoutreader')
  295. with torch.no_grad():
  296. orders = do_predict(boxes, model)
  297. sorted_bboxes = [page_line_list[i] for i in orders]
  298. return sorted_bboxes
  299. def get_line_height(blocks):
  300. page_line_height_list = []
  301. for block in blocks:
  302. if block['type'] in [
  303. BlockType.Text, BlockType.Title,
  304. BlockType.ImageCaption, BlockType.ImageFootnote,
  305. BlockType.TableCaption, BlockType.TableFootnote
  306. ]:
  307. for line in block['lines']:
  308. bbox = line['bbox']
  309. page_line_height_list.append(int(bbox[3] - bbox[1]))
  310. if len(page_line_height_list) > 0:
  311. return statistics.median(page_line_height_list)
  312. else:
  313. return 10
  314. def process_groups(groups, body_key, caption_key, footnote_key):
  315. body_blocks = []
  316. caption_blocks = []
  317. footnote_blocks = []
  318. for i, group in enumerate(groups):
  319. group[body_key]['group_id'] = i
  320. body_blocks.append(group[body_key])
  321. for caption_block in group[caption_key]:
  322. caption_block['group_id'] = i
  323. caption_blocks.append(caption_block)
  324. for footnote_block in group[footnote_key]:
  325. footnote_block['group_id'] = i
  326. footnote_blocks.append(footnote_block)
  327. return body_blocks, caption_blocks, footnote_blocks
  328. def process_block_list(blocks, body_type, block_type):
  329. indices = [block['index'] for block in blocks]
  330. median_index = statistics.median(indices)
  331. body_bbox = next((block['bbox'] for block in blocks if block.get('type') == body_type), [])
  332. return {
  333. 'type': block_type,
  334. 'bbox': body_bbox,
  335. 'blocks': blocks,
  336. 'index': median_index,
  337. }
  338. def revert_group_blocks(blocks):
  339. image_groups = {}
  340. table_groups = {}
  341. new_blocks = []
  342. for block in blocks:
  343. if block['type'] in [BlockType.ImageBody, BlockType.ImageCaption, BlockType.ImageFootnote]:
  344. group_id = block['group_id']
  345. if group_id not in image_groups:
  346. image_groups[group_id] = []
  347. image_groups[group_id].append(block)
  348. elif block['type'] in [BlockType.TableBody, BlockType.TableCaption, BlockType.TableFootnote]:
  349. group_id = block['group_id']
  350. if group_id not in table_groups:
  351. table_groups[group_id] = []
  352. table_groups[group_id].append(block)
  353. else:
  354. new_blocks.append(block)
  355. for group_id, blocks in image_groups.items():
  356. new_blocks.append(process_block_list(blocks, BlockType.ImageBody, BlockType.Image))
  357. for group_id, blocks in table_groups.items():
  358. new_blocks.append(process_block_list(blocks, BlockType.TableBody, BlockType.Table))
  359. return new_blocks
  360. def remove_outside_spans(spans, all_bboxes, all_discarded_blocks):
  361. def get_block_bboxes(blocks, block_type_list):
  362. return [block[0:4] for block in blocks if block[7] in block_type_list]
  363. image_bboxes = get_block_bboxes(all_bboxes, [BlockType.ImageBody])
  364. table_bboxes = get_block_bboxes(all_bboxes, [BlockType.TableBody])
  365. other_block_type = []
  366. for block_type in BlockType.__dict__.values():
  367. if not isinstance(block_type, str):
  368. continue
  369. if block_type not in [BlockType.ImageBody, BlockType.TableBody]:
  370. other_block_type.append(block_type)
  371. other_block_bboxes = get_block_bboxes(all_bboxes, other_block_type)
  372. discarded_block_bboxes = get_block_bboxes(all_discarded_blocks, [BlockType.Discarded])
  373. new_spans = []
  374. for span in spans:
  375. span_bbox = span['bbox']
  376. span_type = span['type']
  377. if any(calculate_overlap_area_in_bbox1_area_ratio(span_bbox, block_bbox) > 0.4 for block_bbox in
  378. discarded_block_bboxes):
  379. new_spans.append(span)
  380. continue
  381. if span_type == ContentType.Image:
  382. if any(calculate_overlap_area_in_bbox1_area_ratio(span_bbox, block_bbox) > 0.5 for block_bbox in
  383. image_bboxes):
  384. new_spans.append(span)
  385. elif span_type == ContentType.Table:
  386. if any(calculate_overlap_area_in_bbox1_area_ratio(span_bbox, block_bbox) > 0.5 for block_bbox in
  387. table_bboxes):
  388. new_spans.append(span)
  389. else:
  390. if any(calculate_overlap_area_in_bbox1_area_ratio(span_bbox, block_bbox) > 0.5 for block_bbox in
  391. other_block_bboxes):
  392. new_spans.append(span)
  393. return new_spans
  394. def parse_page_core(
  395. page_doc: PageableData, magic_model, page_id, pdf_bytes_md5, imageWriter, parse_mode
  396. ):
  397. need_drop = False
  398. drop_reason = []
  399. """从magic_model对象中获取后面会用到的区块信息"""
  400. # img_blocks = magic_model.get_imgs(page_id)
  401. # table_blocks = magic_model.get_tables(page_id)
  402. img_groups = magic_model.get_imgs_v2(page_id)
  403. table_groups = magic_model.get_tables_v2(page_id)
  404. img_body_blocks, img_caption_blocks, img_footnote_blocks = process_groups(
  405. img_groups, 'image_body', 'image_caption_list', 'image_footnote_list'
  406. )
  407. table_body_blocks, table_caption_blocks, table_footnote_blocks = process_groups(
  408. table_groups, 'table_body', 'table_caption_list', 'table_footnote_list'
  409. )
  410. discarded_blocks = magic_model.get_discarded(page_id)
  411. text_blocks = magic_model.get_text_blocks(page_id)
  412. title_blocks = magic_model.get_title_blocks(page_id)
  413. inline_equations, interline_equations, interline_equation_blocks = (
  414. magic_model.get_equations(page_id)
  415. )
  416. page_w, page_h = magic_model.get_page_size(page_id)
  417. """将所有区块的bbox整理到一起"""
  418. # interline_equation_blocks参数不够准,后面切换到interline_equations上
  419. interline_equation_blocks = []
  420. if len(interline_equation_blocks) > 0:
  421. all_bboxes, all_discarded_blocks = ocr_prepare_bboxes_for_layout_split_v2(
  422. img_body_blocks, img_caption_blocks, img_footnote_blocks,
  423. table_body_blocks, table_caption_blocks, table_footnote_blocks,
  424. discarded_blocks,
  425. text_blocks,
  426. title_blocks,
  427. interline_equation_blocks,
  428. page_w,
  429. page_h,
  430. )
  431. else:
  432. all_bboxes, all_discarded_blocks = ocr_prepare_bboxes_for_layout_split_v2(
  433. img_body_blocks, img_caption_blocks, img_footnote_blocks,
  434. table_body_blocks, table_caption_blocks, table_footnote_blocks,
  435. discarded_blocks,
  436. text_blocks,
  437. title_blocks,
  438. interline_equations,
  439. page_w,
  440. page_h,
  441. )
  442. spans = magic_model.get_all_spans(page_id)
  443. """根据parse_mode,构造spans"""
  444. if parse_mode == SupportedPdfParseMethod.TXT:
  445. """ocr 中文本类的 span 用 pymu spans 替换!"""
  446. pymu_spans = txt_spans_extract(page_doc, inline_equations, interline_equations)
  447. spans = replace_text_span(pymu_spans, spans)
  448. elif parse_mode == SupportedPdfParseMethod.OCR:
  449. pass
  450. else:
  451. raise Exception('parse_mode must be txt or ocr')
  452. """在删除重复span之前,应该通过image_body和table_body的block过滤一下image和table的span"""
  453. """顺便删除大水印并保留abandon的span"""
  454. spans = remove_outside_spans(spans, all_bboxes, all_discarded_blocks)
  455. """删除重叠spans中置信度较低的那些"""
  456. spans, dropped_spans_by_confidence = remove_overlaps_low_confidence_spans(spans)
  457. """删除重叠spans中较小的那些"""
  458. spans, dropped_spans_by_span_overlap = remove_overlaps_min_spans(spans)
  459. """对image和table截图"""
  460. spans = ocr_cut_image_and_table(
  461. spans, page_doc, page_id, pdf_bytes_md5, imageWriter
  462. )
  463. """先处理不需要排版的discarded_blocks"""
  464. discarded_block_with_spans, spans = fill_spans_in_blocks(
  465. all_discarded_blocks, spans, 0.4
  466. )
  467. fix_discarded_blocks = fix_discarded_block(discarded_block_with_spans)
  468. """如果当前页面没有bbox则跳过"""
  469. if len(all_bboxes) == 0:
  470. logger.warning(f'skip this page, not found useful bbox, page_id: {page_id}')
  471. return ocr_construct_page_component_v2(
  472. [],
  473. [],
  474. page_id,
  475. page_w,
  476. page_h,
  477. [],
  478. [],
  479. [],
  480. interline_equations,
  481. fix_discarded_blocks,
  482. need_drop,
  483. drop_reason,
  484. )
  485. """将span填入blocks中"""
  486. block_with_spans, spans = fill_spans_in_blocks(all_bboxes, spans, 0.5)
  487. """对block进行fix操作"""
  488. fix_blocks = fix_block_spans_v2(block_with_spans)
  489. """获取所有line并计算正文line的高度"""
  490. line_height = get_line_height(fix_blocks)
  491. """获取所有line并对line排序"""
  492. sorted_bboxes = sort_lines_by_model(fix_blocks, page_w, page_h, line_height)
  493. """根据line的中位数算block的序列关系"""
  494. fix_blocks = cal_block_index(fix_blocks, sorted_bboxes)
  495. """将image和table的block还原回group形式参与后续流程"""
  496. fix_blocks = revert_group_blocks(fix_blocks)
  497. """重排block"""
  498. sorted_blocks = sorted(fix_blocks, key=lambda b: b['index'])
  499. """获取QA需要外置的list"""
  500. images, tables, interline_equations = get_qa_need_list_v2(sorted_blocks)
  501. """构造pdf_info_dict"""
  502. page_info = ocr_construct_page_component_v2(
  503. sorted_blocks,
  504. [],
  505. page_id,
  506. page_w,
  507. page_h,
  508. [],
  509. images,
  510. tables,
  511. interline_equations,
  512. fix_discarded_blocks,
  513. need_drop,
  514. drop_reason,
  515. )
  516. return page_info
  517. def pdf_parse_union(
  518. dataset: Dataset,
  519. model_list,
  520. imageWriter,
  521. parse_mode,
  522. start_page_id=0,
  523. end_page_id=None,
  524. debug_mode=False,
  525. ):
  526. pdf_bytes_md5 = compute_md5(dataset.data_bits())
  527. """初始化空的pdf_info_dict"""
  528. pdf_info_dict = {}
  529. """用model_list和docs对象初始化magic_model"""
  530. magic_model = MagicModel(model_list, dataset)
  531. """根据输入的起始范围解析pdf"""
  532. # end_page_id = end_page_id if end_page_id else len(pdf_docs) - 1
  533. end_page_id = (
  534. end_page_id
  535. if end_page_id is not None and end_page_id >= 0
  536. else len(dataset) - 1
  537. )
  538. if end_page_id > len(dataset) - 1:
  539. logger.warning('end_page_id is out of range, use pdf_docs length')
  540. end_page_id = len(dataset) - 1
  541. """初始化启动时间"""
  542. start_time = time.time()
  543. for page_id, page in enumerate(dataset):
  544. """debug时输出每页解析的耗时."""
  545. if debug_mode:
  546. time_now = time.time()
  547. logger.info(
  548. f'page_id: {page_id}, last_page_cost_time: {get_delta_time(start_time)}'
  549. )
  550. start_time = time_now
  551. """解析pdf中的每一页"""
  552. if start_page_id <= page_id <= end_page_id:
  553. page_info = parse_page_core(
  554. page, magic_model, page_id, pdf_bytes_md5, imageWriter, parse_mode
  555. )
  556. else:
  557. page_info = page.get_page_info()
  558. page_w = page_info.w
  559. page_h = page_info.h
  560. page_info = ocr_construct_page_component_v2(
  561. [], [], page_id, page_w, page_h, [], [], [], [], [], True, 'skip page'
  562. )
  563. pdf_info_dict[f'page_{page_id}'] = page_info
  564. """分段"""
  565. para_split(pdf_info_dict, debug_mode=debug_mode)
  566. """dict转list"""
  567. pdf_info_list = dict_to_list(pdf_info_dict)
  568. new_pdf_info_dict = {
  569. 'pdf_info': pdf_info_list,
  570. }
  571. clean_memory()
  572. return new_pdf_info_dict
  573. if __name__ == '__main__':
  574. pass