pdf_parse_by_txt.py 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411
  1. import time
  2. # from anyio import Path
  3. from magic_pdf.libs.commons import fitz, get_delta_time, get_img_s3_client, get_docx_model_output
  4. import json
  5. import os
  6. import math
  7. from loguru import logger
  8. from magic_pdf.layout.bbox_sort import (
  9. prepare_bboxes_for_layout_split,
  10. )
  11. from magic_pdf.layout.layout_sort import LAYOUT_UNPROC, get_bboxes_layout, get_columns_cnt_of_layout, sort_text_block
  12. from magic_pdf.libs.convert_utils import dict_to_list
  13. from magic_pdf.libs.drop_reason import DropReason
  14. from magic_pdf.libs.hash_utils import compute_md5
  15. from magic_pdf.libs.markdown_utils import escape_special_markdown_char
  16. from magic_pdf.libs.safe_filename import sanitize_filename
  17. from magic_pdf.libs.vis_utils import draw_bbox_on_page, draw_layout_bbox_on_page
  18. from magic_pdf.pre_proc.detect_images import parse_images
  19. from magic_pdf.pre_proc.detect_tables import parse_tables # 获取tables的bbox
  20. from magic_pdf.pre_proc.detect_equation import parse_equations # 获取equations的bbox
  21. from magic_pdf.pre_proc.detect_header import parse_headers # 获取headers的bbox
  22. from magic_pdf.pre_proc.detect_page_number import parse_pageNos # 获取pageNos的bbox
  23. from magic_pdf.pre_proc.detect_footnote import parse_footnotes_by_model, parse_footnotes_by_rule # 获取footnotes的bbox
  24. from magic_pdf.pre_proc.detect_footer_by_model import parse_footers # 获取footers的bbox
  25. from magic_pdf.post_proc.detect_para import (
  26. ParaProcessPipeline,
  27. TitleDetectionException,
  28. TitleLevelException,
  29. ParaSplitException,
  30. ParaMergeException,
  31. DenseSingleLineBlockException,
  32. )
  33. from magic_pdf.pre_proc.main_text_font import get_main_text_font
  34. from magic_pdf.pre_proc.remove_colored_strip_bbox import remove_colored_strip_textblock
  35. from magic_pdf.pre_proc.remove_footer_header import remove_headder_footer_one_page
  36. '''
  37. from para.para_pipeline import ParaProcessPipeline
  38. from para.exceptions import (
  39. TitleDetectionException,
  40. TitleLevelException,
  41. ParaSplitException,
  42. ParaMergeException,
  43. DenseSingleLineBlockException,
  44. )
  45. '''
  46. from magic_pdf.libs.commons import read_file, join_path
  47. from magic_pdf.libs.pdf_image_tools import save_images_by_bboxes
  48. from magic_pdf.post_proc.remove_footnote import merge_footnote_blocks, remove_footnote_blocks
  49. from magic_pdf.pre_proc.citationmarker_remove import remove_citation_marker
  50. from magic_pdf.pre_proc.equations_replace import combine_chars_to_pymudict, remove_chars_in_text_blocks, replace_equations_in_textblock
  51. from magic_pdf.pre_proc.pdf_pre_filter import pdf_filter
  52. from magic_pdf.pre_proc.detect_footer_header_by_statistics import drop_footer_header
  53. from magic_pdf.pre_proc.construct_page_dict import construct_page_component
  54. from magic_pdf.pre_proc.fix_image import combine_images, fix_image_vertical, fix_seperated_image, include_img_title
  55. from magic_pdf.post_proc.pdf_post_filter import pdf_post_filter
  56. from magic_pdf.pre_proc.remove_rotate_bbox import get_side_boundry, remove_rotate_side_textblock, remove_side_blank_block
  57. from magic_pdf.pre_proc.resolve_bbox_conflict import check_text_block_horizontal_overlap, resolve_bbox_overlap_conflict
  58. from magic_pdf.pre_proc.fix_table import fix_table_text_block, fix_tables, include_table_title
  59. from magic_pdf.pre_proc.solve_line_alien import solve_inline_too_large_interval
  60. denseSingleLineBlockException_msg = DenseSingleLineBlockException().message
  61. titleDetectionException_msg = TitleDetectionException().message
  62. titleLevelException_msg = TitleLevelException().message
  63. paraSplitException_msg = ParaSplitException().message
  64. paraMergeException_msg = ParaMergeException().message
  65. def parse_pdf_by_txt(
  66. pdf_bytes,
  67. pdf_model_output,
  68. imageWriter,
  69. start_page_id=0,
  70. end_page_id=None,
  71. debug_mode=False,
  72. ):
  73. pdf_bytes_md5 = compute_md5(pdf_bytes)
  74. pdf_docs = fitz.open("pdf", pdf_bytes)
  75. pdf_info_dict = {}
  76. start_time = time.time()
  77. """通过统计pdf全篇文字,识别正文字体"""
  78. main_text_font = get_main_text_font(pdf_docs)
  79. end_page_id = end_page_id if end_page_id else len(pdf_docs) - 1
  80. for page_id in range(start_page_id, end_page_id + 1):
  81. page = pdf_docs[page_id]
  82. page_width = page.rect.width
  83. page_height = page.rect.height
  84. if debug_mode:
  85. time_now = time.time()
  86. logger.info(f"page_id: {page_id}, last_page_cost_time: {get_delta_time(start_time)}")
  87. start_time = time_now
  88. """
  89. # 通过一个规则,过滤掉单页超过1500非junkimg的pdf
  90. # 对单页面非重复id的img数量做统计,如果当前页超过1500则直接return need_drop
  91. """
  92. page_imgs = page.get_images()
  93. # 去除对junkimg的依赖,简化逻辑
  94. if len(page_imgs) > 1500: # 如果当前页超过1500张图片,直接跳过
  95. logger.warning(f"page_id: {page_id}, img_counts: {len(page_imgs)}, drop this pdf")
  96. result = {"_need_drop": True, "_drop_reason": DropReason.HIGH_COMPUTATIONAL_lOAD_BY_IMGS}
  97. if not debug_mode:
  98. return result
  99. """
  100. ==================================================================================================================================
  101. 首先获取基本的block数据,对pdf进行分解,获取图片、表格、公式、text的bbox
  102. """
  103. # 解析pdf原始文本block
  104. text_raw_blocks = page.get_text(
  105. "dict",
  106. flags=fitz.TEXTFLAGS_TEXT,
  107. )["blocks"]
  108. model_output_json = get_docx_model_output(pdf_model_output, page_id)
  109. # 解析图片
  110. image_bboxes = parse_images(page_id, page, model_output_json)
  111. image_bboxes = fix_image_vertical(image_bboxes, text_raw_blocks) # 修正图片的位置
  112. image_bboxes = fix_seperated_image(image_bboxes) # 合并有边重合的图片
  113. image_bboxes = include_img_title(text_raw_blocks, image_bboxes) # 向图片上方和下方寻找title,使用规则进行匹配,暂时只支持英文规则
  114. """此时image_bboxes中可能出现这种情况,水平并列的2个图片,下方分别有各自的子标题,2个子标题下方又有大标题(形如Figxxx),会出现2个图片的bbox都包含了这个大标题,这种情况需要把图片合并"""
  115. image_bboxes = combine_images(image_bboxes) # 合并图片
  116. # 解析表格并对table_bboxes进行位置的微调,防止表格周围的文字被截断
  117. table_bboxes = parse_tables(page_id, page, model_output_json)
  118. table_bboxes = fix_tables(page, table_bboxes, include_table_title=True, scan_line_num=2) # 修正
  119. table_bboxes = fix_table_text_block(text_raw_blocks, table_bboxes) # 修正与text block的关系,某些table修正与pymupdf获取到的table内textblock没有完全包含,因此要进行一次修正。
  120. #debug_show_bbox(pdf_docs, page_id, table_bboxes, [], [b['bbox'] for b in text_raw_blocks], join_path(save_path, book_name, f"{book_name}_debug.pdf"), 7)
  121. table_bboxes = include_table_title(text_raw_blocks, table_bboxes) # 向table上方和下方寻找title,使用规则进行匹配,暂时只支持英文规则
  122. # 解析公式
  123. equations_inline_bboxes, equations_interline_bboxes = parse_equations(page_id, page, model_output_json)
  124. """
  125. ==================================================================================================================================
  126. 进入预处理-1阶段
  127. -------------------
  128. # # 解析标题
  129. # title_bboxs = parse_titles(page_id, page, model_output_json)
  130. # # 评估Layout是否规整、简单
  131. # isSimpleLayout_flag, fullColumn_cnt, subColumn_cnt, curPage_loss = evaluate_pdf_layout(page_id, page, model_output_json)
  132. 接下来开始进行预处理过程
  133. """
  134. """去掉每页的页码、页眉、页脚"""
  135. page_no_bboxs = parse_pageNos(page_id, page, model_output_json)
  136. header_bboxs = parse_headers(page_id, page, model_output_json)
  137. footer_bboxs = parse_footers(page_id, page, model_output_json)
  138. image_bboxes, table_bboxes, remain_text_blocks, removed_hdr_foot_txt_block, removed_hdr_foot_img_block, removed_hdr_foot_table = remove_headder_footer_one_page(text_raw_blocks, image_bboxes, table_bboxes, header_bboxs, footer_bboxs, page_no_bboxs, page_width, page_height)
  139. """去除页面上半部分长条色块内的文本块"""
  140. remain_text_blocks, removed_colored_narrow_strip_background_text_block = remove_colored_strip_textblock(remain_text_blocks, page)
  141. #debug_show_bbox(pdf_docs, page_id, footnote_bboxes_by_model, [b['bbox'] for b in remain_text_blocks], header_bboxs, join_path(save_path, book_name, f"{book_name}_debug.pdf"), 7)
  142. """去掉旋转的文字:水印、垂直排列的文字"""
  143. remain_text_blocks, removed_non_horz_text_block = remove_rotate_side_textblock(
  144. remain_text_blocks, page_width, page_height
  145. ) # 去掉水印,非水平文字
  146. remain_text_blocks, removed_empty_side_block = remove_side_blank_block(remain_text_blocks, page_width, page_height) # 删除页面四周可能会留下的完全空白的textblock,这种block形成原因未知
  147. """出现在图片、表格上的文字块去掉,把层叠的图片单独分离出来,不参与layout的计算"""
  148. (
  149. image_bboxes,
  150. table_bboxes,
  151. equations_interline_bboxes,
  152. equations_inline_bboxes,
  153. remain_text_blocks,
  154. text_block_on_image_removed,
  155. images_overlap_backup,
  156. interline_eq_temp_text_block
  157. ) = resolve_bbox_overlap_conflict(
  158. image_bboxes, table_bboxes, equations_interline_bboxes, equations_inline_bboxes, remain_text_blocks
  159. )
  160. # """去掉footnote, 从文字和图片中"""
  161. # # 通过模型识别到的footnote
  162. # footnote_bboxes_by_model = parse_footnotes_by_model(page_id, page, model_output_json, md_bookname_save_path,
  163. # debug_mode=debug_mode)
  164. # # 通过规则识别到的footnote
  165. # footnote_bboxes_by_rule = parse_footnotes_by_rule(remain_text_blocks, page_height, page_id)
  166. """
  167. ==================================================================================================================================
  168. """
  169. # 把图、表、公式都进行截图,保存到存储上,返回图片路径作为内容
  170. image_info, image_backup_info, table_info, inline_eq_info, interline_eq_info = save_images_by_bboxes(
  171. page_id,
  172. page,
  173. pdf_bytes_md5,
  174. image_bboxes,
  175. images_overlap_backup,
  176. table_bboxes,
  177. equations_inline_bboxes,
  178. equations_interline_bboxes,
  179. imageWriter
  180. ) # 只要表格和图片的截图
  181. """"以下进入到公式替换环节 """
  182. char_level_text_blocks = page.get_text("rawdict", flags=fitz.TEXTFLAGS_TEXT)['blocks']
  183. remain_text_blocks = combine_chars_to_pymudict(remain_text_blocks, char_level_text_blocks)# 合并chars
  184. remain_text_blocks = replace_equations_in_textblock(remain_text_blocks, inline_eq_info, interline_eq_info)
  185. remain_text_blocks = remove_citation_marker(remain_text_blocks) # 公式替换之后去角标,防止公式无法替换成功。但是这样也会带来个问题就是把角标当公式。各有优劣。
  186. remain_text_blocks = remove_chars_in_text_blocks(remain_text_blocks) # 减少中间态数据体积
  187. #debug_show_bbox(pdf_docs, page_id, [b['bbox'] for b in inline_eq_info], [b['bbox'] for b in interline_eq_info], [], join_path(save_path, book_name, f"{book_name}_debug.pdf"), 3)
  188. """去掉footnote, 从文字和图片中(先去角标再去footnote试试)"""
  189. # 通过模型识别到的footnote
  190. footnote_bboxes_by_model = parse_footnotes_by_model(page_id, page, model_output_json, debug_mode=debug_mode)
  191. # 通过规则识别到的footnote
  192. footnote_bboxes_by_rule = parse_footnotes_by_rule(remain_text_blocks, page_height, page_id, main_text_font)
  193. """进入pdf过滤器,去掉一些不合理的pdf"""
  194. is_good_pdf, err = pdf_filter(page, remain_text_blocks, table_bboxes, image_bboxes)
  195. if not is_good_pdf:
  196. logger.warning(f"page_id: {page_id}, drop this pdf: {pdf_bytes_md5}, reason: {err}")
  197. if not debug_mode:
  198. return err
  199. """
  200. ==================================================================================================================================
  201. 进行版面布局切分和过滤
  202. """
  203. """在切分之前,先检查一下bbox是否有左右重叠的情况,如果有,那么就认为这个pdf暂时没有能力处理好,这种左右重叠的情况大概率是由于pdf里的行间公式、表格没有被正确识别出来造成的 """
  204. is_text_block_horz_overlap = check_text_block_horizontal_overlap(remain_text_blocks, header_bboxs, footer_bboxs)
  205. if is_text_block_horz_overlap:
  206. # debug_show_bbox(pdf_docs, page_id, [b['bbox'] for b in remain_text_blocks], [], [], join_path(save_path, book_name, f"{book_name}_debug.pdf"), 0)
  207. logger.warning(f"page_id: {page_id}, drop this pdf: {pdf_bytes_md5}, reason: {DropReason.TEXT_BLCOK_HOR_OVERLAP}")
  208. result = {"_need_drop": True, "_drop_reason": DropReason.TEXT_BLCOK_HOR_OVERLAP}
  209. if not debug_mode:
  210. return result
  211. """统一格式化成一个数据结构用于计算layout"""
  212. page_y0 = 0 if len(header_bboxs) == 0 else max([b[3] for b in header_bboxs])
  213. page_y1 = page_height if len(footer_bboxs) == 0 else min([b[1] for b in footer_bboxs])
  214. left_x, right_x = get_side_boundry(removed_non_horz_text_block, page_width, page_height)
  215. page_boundry = [math.floor(left_x), page_y0 + 1, math.ceil(right_x), page_y1 - 1]
  216. # 返回的是一个数组,每个元素[x0, y0, x1, y1, block_content, idx_x, idx_y], 初始时候idx_x, idx_y都是None. 对于图片、公式来说,block_content是图片的地址, 对于段落来说,block_content是段落的内容
  217. all_bboxes = prepare_bboxes_for_layout_split(
  218. image_info, image_backup_info, table_info, inline_eq_info, interline_eq_info, remain_text_blocks, page_boundry, page)
  219. #debug_show_bbox(pdf_docs, page_id, [], [], all_bboxes, join_path(save_path, book_name, f"{book_name}_debug.pdf"), 1)
  220. """page_y0, page_y1能够过滤掉页眉和页脚,不会算作layout内"""
  221. layout_bboxes, layout_tree = get_bboxes_layout(all_bboxes, page_boundry, page_id)
  222. if len(remain_text_blocks)>0 and len(all_bboxes)>0 and len(layout_bboxes)==0:
  223. logger.warning(f"page_id: {page_id}, drop this pdf: {pdf_bytes_md5}, reason: {DropReason.CAN_NOT_DETECT_PAGE_LAYOUT}")
  224. result = {"_need_drop": True, "_drop_reason": DropReason.CAN_NOT_DETECT_PAGE_LAYOUT}
  225. if not debug_mode:
  226. return result
  227. """以下去掉复杂的布局和超过2列的布局"""
  228. if any([lay["layout_label"] == LAYOUT_UNPROC for lay in layout_bboxes]): # 复杂的布局
  229. logger.warning(f"page_id: {page_id}, drop this pdf: {pdf_bytes_md5}, reason: {DropReason.COMPLICATED_LAYOUT}")
  230. result = {"_need_drop": True, "_drop_reason": DropReason.COMPLICATED_LAYOUT}
  231. if not debug_mode:
  232. return result
  233. layout_column_width = get_columns_cnt_of_layout(layout_tree)
  234. if layout_column_width > 2: # 去掉超过2列的布局pdf
  235. logger.warning(f"page_id: {page_id}, drop this pdf: {pdf_bytes_md5}, reason: {DropReason.TOO_MANY_LAYOUT_COLUMNS}")
  236. result = {
  237. "_need_drop": True,
  238. "_drop_reason": DropReason.TOO_MANY_LAYOUT_COLUMNS,
  239. "extra_info": {"column_cnt": layout_column_width},
  240. }
  241. if not debug_mode:
  242. return result
  243. """
  244. ==================================================================================================================================
  245. 构造出下游需要的数据结构
  246. """
  247. remain_text_blocks = remain_text_blocks + interline_eq_temp_text_block # 把计算layout时候临时删除的行间公式再放回去,防止行间公式替换的时候丢失。
  248. removed_text_blocks = []
  249. removed_text_blocks.extend(removed_hdr_foot_txt_block)
  250. # removed_text_blocks.extend(removed_footnote_text_block)
  251. removed_text_blocks.extend(text_block_on_image_removed)
  252. removed_text_blocks.extend(removed_non_horz_text_block)
  253. removed_text_blocks.extend(removed_colored_narrow_strip_background_text_block)
  254. removed_images = []
  255. # removed_images.extend(footnote_imgs)
  256. removed_images.extend(removed_hdr_foot_img_block)
  257. images_backup = []
  258. images_backup.extend(image_backup_info)
  259. remain_text_blocks = escape_special_markdown_char(remain_text_blocks) # 转义span里的text
  260. sorted_text_remain_text_block = sort_text_block(remain_text_blocks, layout_bboxes)
  261. footnote_bboxes_tmp = []
  262. footnote_bboxes_tmp.extend(footnote_bboxes_by_model)
  263. footnote_bboxes_tmp.extend(footnote_bboxes_by_rule)
  264. page_info = construct_page_component(
  265. page_id,
  266. image_info,
  267. table_info,
  268. sorted_text_remain_text_block,
  269. layout_bboxes,
  270. inline_eq_info,
  271. interline_eq_info,
  272. page.get_text("dict", flags=fitz.TEXTFLAGS_TEXT)["blocks"],
  273. removed_text_blocks=removed_text_blocks,
  274. removed_image_blocks=removed_images,
  275. images_backup=images_backup,
  276. droped_table_block=[],
  277. table_backup=[],
  278. layout_tree=layout_tree,
  279. page_w=page.rect.width,
  280. page_h=page.rect.height,
  281. footnote_bboxes_tmp=footnote_bboxes_tmp
  282. )
  283. pdf_info_dict[f"page_{page_id}"] = page_info
  284. # end page for
  285. '''计算后处理阶段耗时'''
  286. start_time = time.time()
  287. """
  288. ==================================================================================================================================
  289. 去掉页眉和页脚,这里需要用到一定的统计量,所以放到最后
  290. 页眉和页脚主要从文本box和图片box中去除,位于页面的四周。
  291. 下面函数会直接修改pdf_info_dict,从文字块中、图片中删除属于页眉页脚的内容,删除内容做相对应记录
  292. """
  293. # 去页眉页脚
  294. header, footer = drop_footer_header(pdf_info_dict)
  295. """对单个layout内footnote和他下面的所有textbbox合并"""
  296. for page_key, page_info in pdf_info_dict.items():
  297. page_info = merge_footnote_blocks(page_info, main_text_font)
  298. page_info = remove_footnote_blocks(page_info)
  299. pdf_info_dict[page_key] = page_info
  300. """进入pdf后置过滤器,去掉一些不合理的pdf"""
  301. i = 0
  302. for page_info in pdf_info_dict.values():
  303. is_good_pdf, err = pdf_post_filter(page_info)
  304. if not is_good_pdf:
  305. logger.warning(f"page_id: {i}, drop this pdf: {pdf_bytes_md5}, reason: {err}")
  306. if not debug_mode:
  307. return err
  308. i += 1
  309. if debug_mode:
  310. # 打印后处理阶段耗时
  311. logger.info(f"post_processing_time: {get_delta_time(start_time)}")
  312. """
  313. ==================================================================================================================================
  314. 进入段落处理-2阶段
  315. """
  316. # 处理行内文字间距较大问题
  317. pdf_info_dict = solve_inline_too_large_interval(pdf_info_dict)
  318. start_time = time.time()
  319. para_process_pipeline = ParaProcessPipeline()
  320. def _deal_with_text_exception(error_info):
  321. logger.warning(f"page_id: {page_id}, drop this pdf: {pdf_bytes_md5}, reason: {error_info}")
  322. if error_info == denseSingleLineBlockException_msg:
  323. logger.warning(f"Drop this pdf: {pdf_bytes_md5}, reason: {DropReason.DENSE_SINGLE_LINE_BLOCK}")
  324. result = {"_need_drop": True, "_drop_reason": DropReason.DENSE_SINGLE_LINE_BLOCK}
  325. return result
  326. if error_info == titleDetectionException_msg:
  327. logger.warning(f"Drop this pdf: {pdf_bytes_md5}, reason: {DropReason.TITLE_DETECTION_FAILED}")
  328. result = {"_need_drop": True, "_drop_reason": DropReason.TITLE_DETECTION_FAILED}
  329. return result
  330. elif error_info == titleLevelException_msg:
  331. logger.warning(f"Drop this pdf: {pdf_bytes_md5}, reason: {DropReason.TITLE_LEVEL_FAILED}")
  332. result = {"_need_drop": True, "_drop_reason": DropReason.TITLE_LEVEL_FAILED}
  333. return result
  334. elif error_info == paraSplitException_msg:
  335. logger.warning(f"Drop this pdf: {pdf_bytes_md5}, reason: {DropReason.PARA_SPLIT_FAILED}")
  336. result = {"_need_drop": True, "_drop_reason": DropReason.PARA_SPLIT_FAILED}
  337. return result
  338. elif error_info == paraMergeException_msg:
  339. logger.warning(f"Drop this pdf: {pdf_bytes_md5}, reason: {DropReason.PARA_MERGE_FAILED}")
  340. result = {"_need_drop": True, "_drop_reason": DropReason.PARA_MERGE_FAILED}
  341. return result
  342. pdf_info_dict, error_info = para_process_pipeline.para_process_pipeline(pdf_info_dict)
  343. if error_info is not None:
  344. return _deal_with_text_exception(error_info)
  345. """dict转list"""
  346. pdf_info_list = dict_to_list(pdf_info_dict)
  347. new_pdf_info_dict = {
  348. "pdf_info": pdf_info_list,
  349. }
  350. return new_pdf_info_dict