pdf_parse_for_train.py 28 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685
  1. import time
  2. # from anyio import Path
  3. from magic_pdf.libs.commons import (
  4. fitz,
  5. get_delta_time,
  6. get_img_s3_client,
  7. get_docx_model_output,
  8. )
  9. import json
  10. import os
  11. from copy import deepcopy
  12. import math
  13. from loguru import logger
  14. from magic_pdf.layout.bbox_sort import (
  15. prepare_bboxes_for_layout_split,
  16. )
  17. from magic_pdf.layout.layout_sort import (
  18. LAYOUT_UNPROC,
  19. get_bboxes_layout,
  20. get_columns_cnt_of_layout,
  21. sort_text_block,
  22. )
  23. from magic_pdf.libs.drop_reason import DropReason
  24. from magic_pdf.libs.markdown_utils import escape_special_markdown_char
  25. from magic_pdf.libs.safe_filename import sanitize_filename
  26. from magic_pdf.libs.vis_utils import draw_bbox_on_page, draw_layout_bbox_on_page
  27. from magic_pdf.pre_proc.detect_images import parse_images
  28. from magic_pdf.pre_proc.detect_tables import parse_tables # 获取tables的bbox
  29. from magic_pdf.pre_proc.detect_equation import parse_equations # 获取equations的bbox
  30. from magic_pdf.pre_proc.detect_header import parse_headers # 获取headers的bbox
  31. from magic_pdf.pre_proc.detect_page_number import parse_pageNos # 获取pageNos的bbox
  32. from magic_pdf.pre_proc.detect_footnote import (
  33. parse_footnotes_by_model,
  34. parse_footnotes_by_rule,
  35. ) # 获取footnotes的bbox
  36. from magic_pdf.pre_proc.detect_footer_by_model import parse_footers # 获取footers的bbox
  37. from magic_pdf.post_proc.detect_para import (
  38. ParaProcessPipeline,
  39. TitleDetectionException,
  40. TitleLevelException,
  41. ParaSplitException,
  42. ParaMergeException,
  43. DenseSingleLineBlockException,
  44. )
  45. from magic_pdf.pre_proc.main_text_font import get_main_text_font
  46. from magic_pdf.pre_proc.remove_colored_strip_bbox import remove_colored_strip_textblock
  47. from magic_pdf.pre_proc.remove_footer_header import remove_headder_footer_one_page
  48. from magic_pdf.train_utils.extract_caption import extract_caption_bbox
  49. """
  50. from para.para_pipeline import ParaProcessPipeline
  51. from para.exceptions import (
  52. TitleDetectionException,
  53. TitleLevelException,
  54. ParaSplitException,
  55. ParaMergeException,
  56. DenseSingleLineBlockException,
  57. )
  58. """
  59. from magic_pdf.libs.commons import read_file, join_path
  60. from magic_pdf.libs.pdf_image_tools import save_images_by_bboxes
  61. from magic_pdf.post_proc.remove_footnote import (
  62. merge_footnote_blocks,
  63. remove_footnote_blocks,
  64. )
  65. from magic_pdf.pre_proc.citationmarker_remove import remove_citation_marker
  66. from magic_pdf.pre_proc.equations_replace import (
  67. combine_chars_to_pymudict,
  68. remove_chars_in_text_blocks,
  69. replace_equations_in_textblock,
  70. )
  71. from magic_pdf.pre_proc.pdf_pre_filter import pdf_filter
  72. from magic_pdf.pre_proc.detect_footer_header_by_statistics import drop_footer_header
  73. from magic_pdf.pre_proc.construct_page_dict import construct_page_component
  74. from magic_pdf.pre_proc.fix_image import (
  75. combine_images,
  76. fix_image_vertical,
  77. fix_seperated_image,
  78. include_img_title,
  79. )
  80. from magic_pdf.post_proc.pdf_post_filter import pdf_post_filter
  81. from magic_pdf.pre_proc.remove_rotate_bbox import (
  82. get_side_boundry,
  83. remove_rotate_side_textblock,
  84. remove_side_blank_block,
  85. )
  86. from magic_pdf.pre_proc.resolve_bbox_conflict import (
  87. check_text_block_horizontal_overlap,
  88. resolve_bbox_overlap_conflict,
  89. )
  90. from magic_pdf.pre_proc.fix_table import (
  91. fix_table_text_block,
  92. fix_tables,
  93. include_table_title,
  94. )
  95. from magic_pdf.pre_proc.solve_line_alien import solve_inline_too_large_interval
  96. denseSingleLineBlockException_msg = DenseSingleLineBlockException().message
  97. titleDetectionException_msg = TitleDetectionException().message
  98. titleLevelException_msg = TitleLevelException().message
  99. paraSplitException_msg = ParaSplitException().message
  100. paraMergeException_msg = ParaMergeException().message
  101. def parse_pdf_for_train(
  102. s3_pdf_path,
  103. s3_pdf_profile,
  104. pdf_model_output,
  105. save_path,
  106. book_name,
  107. image_s3_config=None,
  108. start_page_id=0,
  109. end_page_id=None,
  110. junk_img_bojids=[],
  111. debug_mode=False,
  112. ):
  113. pdf_bytes = read_file(s3_pdf_path, s3_pdf_profile)
  114. save_tmp_path = os.path.join(os.path.dirname(__file__), "../..", "tmp", "unittest")
  115. md_bookname_save_path = ""
  116. book_name = sanitize_filename(book_name)
  117. if debug_mode:
  118. save_path = join_path(save_tmp_path, "md")
  119. pdf_local_path = join_path(save_tmp_path, "download-pdfs", book_name)
  120. if not os.path.exists(os.path.dirname(pdf_local_path)):
  121. # 如果目录不存在,创建它
  122. os.makedirs(os.path.dirname(pdf_local_path))
  123. md_bookname_save_path = join_path(save_tmp_path, "md", book_name)
  124. if not os.path.exists(md_bookname_save_path):
  125. # 如果目录不存在,创建它
  126. os.makedirs(md_bookname_save_path)
  127. with open(pdf_local_path + ".pdf", "wb") as pdf_file:
  128. pdf_file.write(pdf_bytes)
  129. pdf_docs = fitz.open("pdf", pdf_bytes)
  130. pdf_info_dict = {}
  131. img_s3_client = get_img_s3_client(
  132. save_path, image_s3_config
  133. ) # 更改函数名和参数,避免歧义
  134. # img_s3_client = "img_s3_client" #不创建这个对象,直接用字符串占位
  135. start_time = time.time()
  136. """通过统计pdf全篇文字,识别正文字体"""
  137. main_text_font = get_main_text_font(pdf_docs)
  138. end_page_id = end_page_id if end_page_id else len(pdf_docs) - 1
  139. for page_id in range(start_page_id, end_page_id + 1):
  140. page = pdf_docs[page_id]
  141. page_width = page.rect.width
  142. page_height = page.rect.height
  143. if debug_mode:
  144. time_now = time.time()
  145. logger.info(
  146. f"page_id: {page_id}, last_page_cost_time: {get_delta_time(start_time)}"
  147. )
  148. start_time = time_now
  149. """
  150. # 通过一个规则,过滤掉单页超过1500非junkimg的pdf
  151. # 对单页面非重复id的img数量做统计,如果当前页超过1500则直接return need_drop
  152. """
  153. page_imgs = page.get_images()
  154. img_counts = 0
  155. for img in page_imgs:
  156. img_bojid = img[0]
  157. if img_bojid in junk_img_bojids: # 判断这个图片在不在junklist中
  158. continue # 如果在junklist就不用管了,跳过
  159. else:
  160. recs = page.get_image_rects(img, transform=True)
  161. if recs: # 如果这张图在当前页面有展示
  162. img_counts += 1
  163. if (
  164. img_counts >= 1500
  165. ): # 如果去除了junkimg的影响,单页img仍然超过1500的话,就排除当前pdf
  166. logger.warning(
  167. f"page_id: {page_id}, img_counts: {img_counts}, drop this pdf: {book_name}, drop_reason: {DropReason.HIGH_COMPUTATIONAL_lOAD_BY_IMGS}"
  168. )
  169. result = {
  170. "_need_drop": True,
  171. "_drop_reason": DropReason.HIGH_COMPUTATIONAL_lOAD_BY_IMGS,
  172. }
  173. if not debug_mode:
  174. return result
  175. """
  176. ==================================================================================================================================
  177. 首先获取基本的block数据,对pdf进行分解,获取图片、表格、公式、text的bbox
  178. """
  179. # 解析pdf原始文本block
  180. text_raw_blocks = page.get_text(
  181. "dict",
  182. flags=fitz.TEXTFLAGS_TEXT,
  183. )["blocks"]
  184. model_output_json = get_docx_model_output(
  185. pdf_model_output, page_id
  186. )
  187. # 解析图片
  188. image_bboxes = parse_images(page_id, page, model_output_json, junk_img_bojids)
  189. image_bboxes = fix_image_vertical(
  190. image_bboxes, text_raw_blocks
  191. ) # 修正图片的位置
  192. image_bboxes = fix_seperated_image(image_bboxes) # 合并有边重合的图片
  193. old_image_bboxes = deepcopy(image_bboxes)
  194. image_bboxes = include_img_title(
  195. text_raw_blocks, image_bboxes
  196. ) # 向图片上方和下方寻找title,使用规则进行匹配,暂时只支持英文规则
  197. """此时image_bboxes中可能出现这种情况,水平并列的2个图片,下方分别有各自的子标题,2个子标题下方又有大标题(形如Figxxx),会出现2个图片的bbox都包含了这个大标题,这种情况需要把图片合并"""
  198. image_bboxes = combine_images(image_bboxes) # 合并图片
  199. # 解析表格并对table_bboxes进行位置的微调,防止表格周围的文字被截断
  200. table_bboxes = parse_tables(page_id, page, model_output_json)
  201. table_bboxes = fix_tables(
  202. page, table_bboxes, include_table_title=False, scan_line_num=2
  203. ) # 修正
  204. table_bboxes = fix_table_text_block(
  205. text_raw_blocks, table_bboxes
  206. ) # 修正与text block的关系,某些table修正与pymupdf获取到的table内textblock没有完全包含,因此要进行一次修正。
  207. # debug_show_bbox(pdf_docs, page_id, table_bboxes, [], [b['bbox'] for b in text_raw_blocks], join_path(save_path, book_name, f"{book_name}_debug.pdf"), 7)
  208. old_table_bboxes = deepcopy(table_bboxes)
  209. table_bboxes = include_table_title(
  210. text_raw_blocks, table_bboxes
  211. ) # 向table上方和下方寻找title,使用规则进行匹配,暂时只支持英文规则
  212. # 解析公式
  213. equations_inline_bboxes, equations_interline_bboxes = parse_equations(
  214. page_id, page, model_output_json
  215. )
  216. # get image box and caption !
  217. image_bboxes_with_caption = extract_caption_bbox(image_bboxes, old_image_bboxes)
  218. # get table box and caption !
  219. table_bboxes_with_caption = extract_caption_bbox(table_bboxes, old_table_bboxes)
  220. """
  221. ==================================================================================================================================
  222. 进入预处理-1阶段
  223. -------------------
  224. # # 解析标题
  225. # title_bboxs = parse_titles(page_id, page, model_output_json)
  226. # # 评估Layout是否规整、简单
  227. # isSimpleLayout_flag, fullColumn_cnt, subColumn_cnt, curPage_loss = evaluate_pdf_layout(page_id, page, model_output_json)
  228. 接下来开始进行预处理过程
  229. """
  230. # title_bboxs = parse_titles(page_id, page, model_output_json)
  231. """去掉每页的页码、页眉、页脚"""
  232. page_no_bboxs = parse_pageNos(page_id, page, model_output_json)
  233. header_bboxs = parse_headers(page_id, page, model_output_json)
  234. footer_bboxs = parse_footers(page_id, page, model_output_json)
  235. (
  236. image_bboxes,
  237. table_bboxes,
  238. remain_text_blocks,
  239. removed_hdr_foot_txt_block,
  240. removed_hdr_foot_img_block,
  241. removed_hdr_foot_table,
  242. ) = remove_headder_footer_one_page(
  243. text_raw_blocks,
  244. image_bboxes,
  245. table_bboxes,
  246. header_bboxs,
  247. footer_bboxs,
  248. page_no_bboxs,
  249. page_width,
  250. page_height,
  251. )
  252. """去除页面上半部分长条色块内的文本块"""
  253. remain_text_blocks, removed_colored_narrow_strip_background_text_block = (
  254. remove_colored_strip_textblock(remain_text_blocks, page)
  255. )
  256. # debug_show_bbox(pdf_docs, page_id, footnote_bboxes_by_model, [b['bbox'] for b in remain_text_blocks], header_bboxs, join_path(save_path, book_name, f"{book_name}_debug.pdf"), 7)
  257. """去掉旋转的文字:水印、垂直排列的文字"""
  258. remain_text_blocks, removed_non_horz_text_block = remove_rotate_side_textblock(
  259. remain_text_blocks, page_width, page_height
  260. ) # 去掉水印,非水平文字
  261. remain_text_blocks, removed_empty_side_block = remove_side_blank_block(
  262. remain_text_blocks, page_width, page_height
  263. ) # 删除页面四周可能会留下的完全空白的textblock,这种block形成原因未知
  264. """出现在图片、表格上的文字块去掉,把层叠的图片单独分离出来,不参与layout的计算"""
  265. (
  266. image_bboxes,
  267. table_bboxes,
  268. equations_interline_bboxes,
  269. equations_inline_bboxes,
  270. remain_text_blocks,
  271. text_block_on_image_removed,
  272. images_overlap_backup,
  273. interline_eq_temp_text_block,
  274. ) = resolve_bbox_overlap_conflict(
  275. image_bboxes,
  276. table_bboxes,
  277. equations_interline_bboxes,
  278. equations_inline_bboxes,
  279. remain_text_blocks,
  280. )
  281. # """去掉footnote, 从文字和图片中"""
  282. # # 通过模型识别到的footnote
  283. # footnote_bboxes_by_model = parse_footnotes_by_model(page_id, page, model_output_json, md_bookname_save_path,
  284. # debug_mode=debug_mode)
  285. # # 通过规则识别到的footnote
  286. # footnote_bboxes_by_rule = parse_footnotes_by_rule(remain_text_blocks, page_height, page_id)
  287. """
  288. ==================================================================================================================================
  289. """
  290. if debug_mode: # debugmode截图到本地
  291. save_path = join_path(save_tmp_path, "md")
  292. # 把图、表、公式都进行截图,保存到存储上,返回图片路径作为内容
  293. image_info, image_backup_info, table_info, inline_eq_info, interline_eq_info = (
  294. save_images_by_bboxes(
  295. book_name,
  296. page_id,
  297. page,
  298. save_path,
  299. image_bboxes,
  300. images_overlap_backup,
  301. table_bboxes,
  302. equations_inline_bboxes,
  303. equations_interline_bboxes,
  304. # 传入img_s3_client
  305. img_s3_client,
  306. )
  307. ) # 只要表格和图片的截图
  308. """"以下进入到公式替换环节 """
  309. char_level_text_blocks = page.get_text("rawdict", flags=fitz.TEXTFLAGS_TEXT)[
  310. "blocks"
  311. ]
  312. remain_text_blocks = combine_chars_to_pymudict(
  313. remain_text_blocks, char_level_text_blocks
  314. ) # 合并chars
  315. remain_text_blocks = replace_equations_in_textblock(
  316. remain_text_blocks, inline_eq_info, interline_eq_info
  317. )
  318. remain_text_blocks = remove_citation_marker(
  319. remain_text_blocks
  320. ) # 公式替换之后去角标,防止公式无法替换成功。但是这样也会带来个问题就是把角标当公式。各有优劣。
  321. remain_text_blocks = remove_chars_in_text_blocks(
  322. remain_text_blocks
  323. ) # 减少中间态数据体积
  324. # debug_show_bbox(pdf_docs, page_id, [b['bbox'] for b in inline_eq_info], [b['bbox'] for b in interline_eq_info], [], join_path(save_path, book_name, f"{book_name}_debug.pdf"), 3)
  325. """去掉footnote, 从文字和图片中(先去角标再去footnote试试)"""
  326. # 通过模型识别到的footnote
  327. footnote_bboxes_by_model = parse_footnotes_by_model(
  328. page_id,
  329. page,
  330. model_output_json,
  331. md_bookname_save_path,
  332. debug_mode=debug_mode,
  333. )
  334. # 通过规则识别到的footnote
  335. footnote_bboxes_by_rule = parse_footnotes_by_rule(
  336. remain_text_blocks, page_height, page_id, main_text_font
  337. )
  338. """进入pdf过滤器,去掉一些不合理的pdf"""
  339. is_good_pdf, err = pdf_filter(
  340. page, remain_text_blocks, table_bboxes, image_bboxes
  341. )
  342. if not is_good_pdf:
  343. logger.warning(
  344. f"page_id: {page_id}, drop this pdf: {book_name}, reason: {err}"
  345. )
  346. if not debug_mode:
  347. return err
  348. """
  349. ==================================================================================================================================
  350. 进行版面布局切分和过滤
  351. """
  352. """在切分之前,先检查一下bbox是否有左右重叠的情况,如果有,那么就认为这个pdf暂时没有能力处理好,这种左右重叠的情况大概率是由于pdf里的行间公式、表格没有被正确识别出来造成的 """
  353. is_text_block_horz_overlap = check_text_block_horizontal_overlap(
  354. remain_text_blocks, header_bboxs, footer_bboxs
  355. )
  356. if is_text_block_horz_overlap:
  357. # debug_show_bbox(pdf_docs, page_id, [b['bbox'] for b in remain_text_blocks], [], [], join_path(save_path, book_name, f"{book_name}_debug.pdf"), 0)
  358. logger.warning(
  359. f"page_id: {page_id}, drop this pdf: {book_name}, reason: {DropReason.TEXT_BLCOK_HOR_OVERLAP}"
  360. )
  361. result = {
  362. "_need_drop": True,
  363. "_drop_reason": DropReason.TEXT_BLCOK_HOR_OVERLAP,
  364. }
  365. if not debug_mode:
  366. return result
  367. """统一格式化成一个数据结构用于计算layout"""
  368. page_y0 = 0 if len(header_bboxs) == 0 else max([b[3] for b in header_bboxs])
  369. page_y1 = (
  370. page_height if len(footer_bboxs) == 0 else min([b[1] for b in footer_bboxs])
  371. )
  372. left_x, right_x = get_side_boundry(
  373. removed_non_horz_text_block, page_width, page_height
  374. )
  375. page_boundry = [
  376. math.floor(left_x),
  377. page_y0 + 1,
  378. math.ceil(right_x),
  379. page_y1 - 1,
  380. ]
  381. # 返回的是一个数组,每个元素[x0, y0, x1, y1, block_content, idx_x, idx_y], 初始时候idx_x, idx_y都是None. 对于图片、公式来说,block_content是图片的地址, 对于段落来说,block_content是段落的内容
  382. all_bboxes = prepare_bboxes_for_layout_split(
  383. image_info,
  384. image_backup_info,
  385. table_info,
  386. inline_eq_info,
  387. interline_eq_info,
  388. remain_text_blocks,
  389. page_boundry,
  390. page,
  391. )
  392. # debug_show_bbox(pdf_docs, page_id, [], [], all_bboxes, join_path(save_path, book_name, f"{book_name}_debug.pdf"), 1)
  393. """page_y0, page_y1能够过滤掉页眉和页脚,不会算作layout内"""
  394. layout_bboxes, layout_tree = get_bboxes_layout(
  395. all_bboxes, page_boundry, page_id
  396. )
  397. if (
  398. len(remain_text_blocks) > 0
  399. and len(all_bboxes) > 0
  400. and len(layout_bboxes) == 0
  401. ):
  402. logger.warning(
  403. f"page_id: {page_id}, drop this pdf: {book_name}, reason: {DropReason.CAN_NOT_DETECT_PAGE_LAYOUT}"
  404. )
  405. result = {
  406. "_need_drop": True,
  407. "_drop_reason": DropReason.CAN_NOT_DETECT_PAGE_LAYOUT,
  408. }
  409. if not debug_mode:
  410. return result
  411. """以下去掉复杂的布局和超过2列的布局"""
  412. if any(
  413. [lay["layout_label"] == LAYOUT_UNPROC for lay in layout_bboxes]
  414. ): # 复杂的布局
  415. logger.warning(
  416. f"page_id: {page_id}, drop this pdf: {book_name}, reason: {DropReason.COMPLICATED_LAYOUT}"
  417. )
  418. result = {"_need_drop": True, "_drop_reason": DropReason.COMPLICATED_LAYOUT}
  419. if not debug_mode:
  420. return result
  421. layout_column_width = get_columns_cnt_of_layout(layout_tree)
  422. if layout_column_width > 2: # 去掉超过2列的布局pdf
  423. logger.warning(
  424. f"page_id: {page_id}, drop this pdf: {book_name}, reason: {DropReason.TOO_MANY_LAYOUT_COLUMNS}"
  425. )
  426. result = {
  427. "_need_drop": True,
  428. "_drop_reason": DropReason.TOO_MANY_LAYOUT_COLUMNS,
  429. "extra_info": {"column_cnt": layout_column_width},
  430. }
  431. if not debug_mode:
  432. return result
  433. """
  434. ==================================================================================================================================
  435. 构造出下游需要的数据结构
  436. """
  437. remain_text_blocks = (
  438. remain_text_blocks + interline_eq_temp_text_block
  439. ) # 把计算layout时候临时删除的行间公式再放回去,防止行间公式替换的时候丢失。
  440. removed_text_blocks = []
  441. removed_text_blocks.extend(removed_hdr_foot_txt_block)
  442. # removed_text_blocks.extend(removed_footnote_text_block)
  443. removed_text_blocks.extend(text_block_on_image_removed)
  444. removed_text_blocks.extend(removed_non_horz_text_block)
  445. removed_text_blocks.extend(removed_colored_narrow_strip_background_text_block)
  446. removed_images = []
  447. # removed_images.extend(footnote_imgs)
  448. removed_images.extend(removed_hdr_foot_img_block)
  449. images_backup = []
  450. images_backup.extend(image_backup_info)
  451. remain_text_blocks = escape_special_markdown_char(
  452. remain_text_blocks
  453. ) # 转义span里的text
  454. sorted_text_remain_text_block = sort_text_block(
  455. remain_text_blocks, layout_bboxes
  456. )
  457. footnote_bboxes_tmp = []
  458. footnote_bboxes_tmp.extend(footnote_bboxes_by_model)
  459. footnote_bboxes_tmp.extend(footnote_bboxes_by_rule)
  460. page_info = construct_page_component(
  461. page_id,
  462. image_info,
  463. table_info,
  464. sorted_text_remain_text_block,
  465. layout_bboxes,
  466. inline_eq_info,
  467. interline_eq_info,
  468. page.get_text("dict", flags=fitz.TEXTFLAGS_TEXT)["blocks"],
  469. removed_text_blocks=removed_text_blocks,
  470. removed_image_blocks=removed_images,
  471. images_backup=images_backup,
  472. droped_table_block=[],
  473. table_backup=[],
  474. layout_tree=layout_tree,
  475. page_w=page.rect.width,
  476. page_h=page.rect.height,
  477. footnote_bboxes_tmp=footnote_bboxes_tmp,
  478. )
  479. page_info["image_bboxes_with_caption"] = image_bboxes_with_caption # add by xr
  480. page_info["table_bboxes_with_caption"] = table_bboxes_with_caption
  481. page_info["bak_page_no_bboxes"] = page_no_bboxs
  482. page_info["bak_header_bboxes"] = header_bboxs
  483. page_info["bak_footer_bboxes"] = footer_bboxs
  484. page_info["bak_footer_note_bboxes"] = footnote_bboxes_tmp
  485. pdf_info_dict[f"page_{page_id}"] = page_info
  486. # end page for
  487. """计算后处理阶段耗时"""
  488. start_time = time.time()
  489. """
  490. ==================================================================================================================================
  491. 去掉页眉和页脚,这里需要用到一定的统计量,所以放到最后
  492. 页眉和页脚主要从文本box和图片box中去除,位于页面的四周。
  493. 下面函数会直接修改pdf_info_dict,从文字块中、图片中删除属于页眉页脚的内容,删除内容做相对应记录
  494. """
  495. # 去页眉页脚
  496. header, footer = drop_footer_header(
  497. pdf_info_dict
  498. ) # TODO: using header and footer boxes here !
  499. """对单个layout内footnote和他下面的所有textbbox合并"""
  500. for page_key, page_info in pdf_info_dict.items():
  501. page_info = merge_footnote_blocks(page_info, main_text_font)
  502. page_info = remove_footnote_blocks(page_info)
  503. pdf_info_dict[page_key] = page_info
  504. """进入pdf后置过滤器,去掉一些不合理的pdf"""
  505. i = 0
  506. for page_info in pdf_info_dict.values():
  507. is_good_pdf, err = pdf_post_filter(page_info)
  508. if not is_good_pdf:
  509. logger.warning(f"page_id: {i}, drop this pdf: {book_name}, reason: {err}")
  510. if not debug_mode:
  511. return err
  512. i += 1
  513. if debug_mode:
  514. params_file_save_path = join_path(
  515. save_tmp_path, "md", book_name, "preproc_out.json"
  516. )
  517. page_draw_rect_save_path = join_path(
  518. save_tmp_path, "md", book_name, "layout.pdf"
  519. )
  520. # dir_path = os.path.dirname(page_draw_rect_save_path)
  521. # if not os.path.exists(dir_path):
  522. # # 如果目录不存在,创建它
  523. # os.makedirs(dir_path)
  524. with open(params_file_save_path, "w", encoding="utf-8") as f:
  525. json.dump(pdf_info_dict, f, ensure_ascii=False, indent=4)
  526. # 先检测本地 page_draw_rect_save_path 是否存在,如果存在则删除
  527. if os.path.exists(page_draw_rect_save_path):
  528. os.remove(page_draw_rect_save_path)
  529. # 绘制bbox和layout到pdf
  530. draw_bbox_on_page(pdf_docs, pdf_info_dict, page_draw_rect_save_path)
  531. draw_layout_bbox_on_page(
  532. pdf_docs, pdf_info_dict, header, footer, page_draw_rect_save_path
  533. )
  534. if debug_mode:
  535. # 打印后处理阶段耗时
  536. logger.info(f"post_processing_time: {get_delta_time(start_time)}")
  537. """
  538. ==================================================================================================================================
  539. 进入段落处理-2阶段
  540. """
  541. # 处理行内文字间距较大问题
  542. pdf_info_dict = solve_inline_too_large_interval(pdf_info_dict)
  543. start_time = time.time()
  544. para_process_pipeline = ParaProcessPipeline()
  545. def _deal_with_text_exception(error_info):
  546. logger.warning(
  547. f"page_id: {page_id}, drop this pdf: {book_name}, reason: {error_info}"
  548. )
  549. if error_info == denseSingleLineBlockException_msg:
  550. logger.warning(
  551. f"Drop this pdf: {book_name}, reason: {DropReason.DENSE_SINGLE_LINE_BLOCK}"
  552. )
  553. result = {
  554. "_need_drop": True,
  555. "_drop_reason": DropReason.DENSE_SINGLE_LINE_BLOCK,
  556. }
  557. return result
  558. if error_info == titleDetectionException_msg:
  559. logger.warning(
  560. f"Drop this pdf: {book_name}, reason: {DropReason.TITLE_DETECTION_FAILED}"
  561. )
  562. result = {
  563. "_need_drop": True,
  564. "_drop_reason": DropReason.TITLE_DETECTION_FAILED,
  565. }
  566. return result
  567. elif error_info == titleLevelException_msg:
  568. logger.warning(
  569. f"Drop this pdf: {book_name}, reason: {DropReason.TITLE_LEVEL_FAILED}"
  570. )
  571. result = {"_need_drop": True, "_drop_reason": DropReason.TITLE_LEVEL_FAILED}
  572. return result
  573. elif error_info == paraSplitException_msg:
  574. logger.warning(
  575. f"Drop this pdf: {book_name}, reason: {DropReason.PARA_SPLIT_FAILED}"
  576. )
  577. result = {"_need_drop": True, "_drop_reason": DropReason.PARA_SPLIT_FAILED}
  578. return result
  579. elif error_info == paraMergeException_msg:
  580. logger.warning(
  581. f"Drop this pdf: {book_name}, reason: {DropReason.PARA_MERGE_FAILED}"
  582. )
  583. result = {"_need_drop": True, "_drop_reason": DropReason.PARA_MERGE_FAILED}
  584. return result
  585. if debug_mode:
  586. input_pdf_file = f"{pdf_local_path}.pdf"
  587. output_dir = f"{save_path}/{book_name}"
  588. output_pdf_file = f"{output_dir}/pdf_annos.pdf"
  589. """
  590. Call the para_process_pipeline function to process the pdf_info_dict.
  591. Parameters:
  592. para_debug_mode: str or None
  593. If para_debug_mode is None, the para_process_pipeline will not keep any intermediate results.
  594. If para_debug_mode is "simple", the para_process_pipeline will only keep the annos on the pdf and the final results as a json file.
  595. If para_debug_mode is "full", the para_process_pipeline will keep all the intermediate results generated during each step.
  596. """
  597. pdf_info_dict, error_info = para_process_pipeline.para_process_pipeline(
  598. pdf_info_dict,
  599. para_debug_mode="simple",
  600. input_pdf_path=input_pdf_file,
  601. output_pdf_path=output_pdf_file,
  602. )
  603. # 打印段落处理阶段耗时
  604. logger.info(f"para_process_time: {get_delta_time(start_time)}")
  605. # debug的时候不return drop信息
  606. if error_info is not None:
  607. _deal_with_text_exception(error_info)
  608. return pdf_info_dict
  609. else:
  610. pdf_info_dict, error_info = para_process_pipeline.para_process_pipeline(
  611. pdf_info_dict
  612. )
  613. if error_info is not None:
  614. return _deal_with_text_exception(error_info)
  615. return pdf_info_dict