ocr_mkcontent.py 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459
  1. import re
  2. import wordninja
  3. from loguru import logger
  4. from magic_pdf.libs.commons import join_path
  5. from magic_pdf.libs.language import detect_lang
  6. from magic_pdf.libs.MakeContentConfig import DropMode, MakeMode
  7. from magic_pdf.libs.markdown_utils import ocr_escape_special_markdown_char
  8. from magic_pdf.libs.ocr_content_type import BlockType, ContentType
  9. from magic_pdf.para.para_split_v3 import ListLineTag
  10. def __is_hyphen_at_line_end(line):
  11. """
  12. Check if a line ends with one or more letters followed by a hyphen.
  13. Args:
  14. line (str): The line of text to check.
  15. Returns:
  16. bool: True if the line ends with one or more letters followed by a hyphen, False otherwise.
  17. """
  18. # Use regex to check if the line ends with one or more letters followed by a hyphen
  19. return bool(re.search(r'[A-Za-z]+-\s*$', line))
  20. def split_long_words(text):
  21. segments = text.split(' ')
  22. for i in range(len(segments)):
  23. words = re.findall(r'\w+|[^\w]', segments[i], re.UNICODE)
  24. for j in range(len(words)):
  25. if len(words[j]) > 10:
  26. words[j] = ' '.join(wordninja.split(words[j]))
  27. segments[i] = ''.join(words)
  28. return ' '.join(segments)
  29. def ocr_mk_mm_markdown_with_para(pdf_info_list: list, img_buket_path):
  30. markdown = []
  31. for page_info in pdf_info_list:
  32. paras_of_layout = page_info.get('para_blocks')
  33. page_markdown = ocr_mk_markdown_with_para_core_v2(
  34. paras_of_layout, 'mm', img_buket_path)
  35. markdown.extend(page_markdown)
  36. return '\n\n'.join(markdown)
  37. def ocr_mk_nlp_markdown_with_para(pdf_info_dict: list):
  38. markdown = []
  39. for page_info in pdf_info_dict:
  40. paras_of_layout = page_info.get('para_blocks')
  41. page_markdown = ocr_mk_markdown_with_para_core_v2(
  42. paras_of_layout, 'nlp')
  43. markdown.extend(page_markdown)
  44. return '\n\n'.join(markdown)
  45. def ocr_mk_mm_markdown_with_para_and_pagination(pdf_info_dict: list,
  46. img_buket_path):
  47. markdown_with_para_and_pagination = []
  48. page_no = 0
  49. for page_info in pdf_info_dict:
  50. paras_of_layout = page_info.get('para_blocks')
  51. if not paras_of_layout:
  52. continue
  53. page_markdown = ocr_mk_markdown_with_para_core_v2(
  54. paras_of_layout, 'mm', img_buket_path)
  55. markdown_with_para_and_pagination.append({
  56. 'page_no':
  57. page_no,
  58. 'md_content':
  59. '\n\n'.join(page_markdown)
  60. })
  61. page_no += 1
  62. return markdown_with_para_and_pagination
  63. def ocr_mk_markdown_with_para_core(paras_of_layout, mode, img_buket_path=''):
  64. page_markdown = []
  65. for paras in paras_of_layout:
  66. for para in paras:
  67. para_text = ''
  68. for line in para:
  69. for span in line['spans']:
  70. span_type = span.get('type')
  71. content = ''
  72. language = ''
  73. if span_type == ContentType.Text:
  74. content = span['content']
  75. language = detect_lang(content)
  76. if (language == 'en'): # 只对英文长词进行分词处理,中文分词会丢失文本
  77. content = ocr_escape_special_markdown_char(
  78. split_long_words(content))
  79. else:
  80. content = ocr_escape_special_markdown_char(content)
  81. elif span_type == ContentType.InlineEquation:
  82. content = f"${span['content']}$"
  83. elif span_type == ContentType.InterlineEquation:
  84. content = f"\n$$\n{span['content']}\n$$\n"
  85. elif span_type in [ContentType.Image, ContentType.Table]:
  86. if mode == 'mm':
  87. content = f"\n![]({join_path(img_buket_path, span['image_path'])})\n"
  88. elif mode == 'nlp':
  89. pass
  90. if content != '':
  91. if language == 'en': # 英文语境下 content间需要空格分隔
  92. para_text += content + ' '
  93. else: # 中文语境下,content间不需要空格分隔
  94. para_text += content
  95. if para_text.strip() == '':
  96. continue
  97. else:
  98. page_markdown.append(para_text.strip() + ' ')
  99. return page_markdown
  100. def ocr_mk_markdown_with_para_core_v2(paras_of_layout,
  101. mode,
  102. img_buket_path='',
  103. parse_type="auto",
  104. lang=None
  105. ):
  106. page_markdown = []
  107. for para_block in paras_of_layout:
  108. para_text = ''
  109. para_type = para_block['type']
  110. if para_type in [BlockType.Text, BlockType.List, BlockType.Index]:
  111. para_text = merge_para_with_text(para_block, parse_type=parse_type, lang=lang)
  112. elif para_type == BlockType.Title:
  113. para_text = f'# {merge_para_with_text(para_block, parse_type=parse_type, lang=lang)}'
  114. elif para_type == BlockType.InterlineEquation:
  115. para_text = merge_para_with_text(para_block, parse_type=parse_type, lang=lang)
  116. elif para_type == BlockType.Image:
  117. if mode == 'nlp':
  118. continue
  119. elif mode == 'mm':
  120. for block in para_block['blocks']: # 1st.拼image_body
  121. if block['type'] == BlockType.ImageBody:
  122. for line in block['lines']:
  123. for span in line['spans']:
  124. if span['type'] == ContentType.Image:
  125. para_text += f"\n![]({join_path(img_buket_path, span['image_path'])}) \n"
  126. for block in para_block['blocks']: # 2nd.拼image_caption
  127. if block['type'] == BlockType.ImageCaption:
  128. para_text += merge_para_with_text(block, parse_type=parse_type, lang=lang)
  129. for block in para_block['blocks']: # 2nd.拼image_caption
  130. if block['type'] == BlockType.ImageFootnote:
  131. para_text += merge_para_with_text(block, parse_type=parse_type, lang=lang)
  132. elif para_type == BlockType.Table:
  133. if mode == 'nlp':
  134. continue
  135. elif mode == 'mm':
  136. for block in para_block['blocks']: # 1st.拼table_caption
  137. if block['type'] == BlockType.TableCaption:
  138. para_text += merge_para_with_text(block, parse_type=parse_type, lang=lang)
  139. for block in para_block['blocks']: # 2nd.拼table_body
  140. if block['type'] == BlockType.TableBody:
  141. for line in block['lines']:
  142. for span in line['spans']:
  143. if span['type'] == ContentType.Table:
  144. # if processed by table model
  145. if span.get('latex', ''):
  146. para_text += f"\n\n$\n {span['latex']}\n$\n\n"
  147. elif span.get('html', ''):
  148. para_text += f"\n\n{span['html']}\n\n"
  149. else:
  150. para_text += f"\n![]({join_path(img_buket_path, span['image_path'])}) \n"
  151. for block in para_block['blocks']: # 3rd.拼table_footnote
  152. if block['type'] == BlockType.TableFootnote:
  153. para_text += merge_para_with_text(block, parse_type=parse_type, lang=lang)
  154. if para_text.strip() == '':
  155. continue
  156. else:
  157. page_markdown.append(para_text.strip() + ' ')
  158. return page_markdown
  159. def detect_language(text):
  160. en_pattern = r'[a-zA-Z]+'
  161. en_matches = re.findall(en_pattern, text)
  162. en_length = sum(len(match) for match in en_matches)
  163. if len(text) > 0:
  164. if en_length / len(text) >= 0.5:
  165. return 'en'
  166. else:
  167. return 'unknown'
  168. else:
  169. return 'empty'
  170. def merge_para_with_text(para_block, parse_type="auto", lang=None):
  171. para_text = ''
  172. for i, line in enumerate(para_block['lines']):
  173. if i >= 1 and line.get(ListLineTag.IS_LIST_START_LINE, False):
  174. para_text += ' \n'
  175. line_text = ''
  176. line_lang = ''
  177. for span in line['spans']:
  178. span_type = span['type']
  179. if span_type == ContentType.Text:
  180. line_text += span['content'].strip()
  181. if line_text != '':
  182. line_lang = detect_lang(line_text)
  183. for span in line['spans']:
  184. span_type = span['type']
  185. content = ''
  186. if span_type == ContentType.Text:
  187. content = span['content']
  188. # language = detect_lang(content)
  189. language = detect_language(content)
  190. # 判断是否小语种
  191. if lang is not None and lang != 'en':
  192. content = ocr_escape_special_markdown_char(content)
  193. else: # 非小语种逻辑
  194. if language == 'en' and parse_type == 'ocr': # 只对英文长词进行分词处理,中文分词会丢失文本
  195. content = ocr_escape_special_markdown_char(
  196. split_long_words(content))
  197. else:
  198. content = ocr_escape_special_markdown_char(content)
  199. elif span_type == ContentType.InlineEquation:
  200. content = f" ${span['content']}$ "
  201. elif span_type == ContentType.InterlineEquation:
  202. content = f"\n$$\n{span['content']}\n$$\n"
  203. if content != '':
  204. langs = ['zh', 'ja', 'ko']
  205. if line_lang in langs: # 遇到一些一个字一个span的文档,这种单字语言判断不准,需要用整行文本判断
  206. para_text += content # 中文/日语/韩文语境下,content间不需要空格分隔
  207. elif line_lang == 'en':
  208. # 如果是前一行带有-连字符,那么末尾不应该加空格
  209. if __is_hyphen_at_line_end(content):
  210. para_text += content[:-1]
  211. else:
  212. para_text += content + ' '
  213. else:
  214. para_text += content + ' ' # 西方文本语境下 content间需要空格分隔
  215. return para_text
  216. def para_to_standard_format(para, img_buket_path):
  217. para_content = {}
  218. if len(para) == 1:
  219. para_content = line_to_standard_format(para[0], img_buket_path)
  220. elif len(para) > 1:
  221. para_text = ''
  222. inline_equation_num = 0
  223. for line in para:
  224. for span in line['spans']:
  225. language = ''
  226. span_type = span.get('type')
  227. content = ''
  228. if span_type == ContentType.Text:
  229. content = span['content']
  230. language = detect_lang(content)
  231. if language == 'en': # 只对英文长词进行分词处理,中文分词会丢失文本
  232. content = ocr_escape_special_markdown_char(
  233. split_long_words(content))
  234. else:
  235. content = ocr_escape_special_markdown_char(content)
  236. elif span_type == ContentType.InlineEquation:
  237. content = f"${span['content']}$"
  238. inline_equation_num += 1
  239. if language == 'en': # 英文语境下 content间需要空格分隔
  240. para_text += content + ' '
  241. else: # 中文语境下,content间不需要空格分隔
  242. para_text += content
  243. para_content = {
  244. 'type': 'text',
  245. 'text': para_text,
  246. 'inline_equation_num': inline_equation_num,
  247. }
  248. return para_content
  249. def para_to_standard_format_v2(para_block, img_buket_path, page_idx, parse_type="auto", lang=None, drop_reason=None):
  250. para_type = para_block['type']
  251. para_content = {}
  252. if para_type == BlockType.Text:
  253. para_content = {
  254. 'type': 'text',
  255. 'text': merge_para_with_text(para_block, parse_type=parse_type, lang=lang),
  256. }
  257. elif para_type == BlockType.Title:
  258. para_content = {
  259. 'type': 'text',
  260. 'text': merge_para_with_text(para_block, parse_type=parse_type, lang=lang),
  261. 'text_level': 1,
  262. }
  263. elif para_type == BlockType.InterlineEquation:
  264. para_content = {
  265. 'type': 'equation',
  266. 'text': merge_para_with_text(para_block, parse_type=parse_type, lang=lang),
  267. 'text_format': 'latex',
  268. }
  269. elif para_type == BlockType.Image:
  270. para_content = {'type': 'image'}
  271. for block in para_block['blocks']:
  272. if block['type'] == BlockType.ImageBody:
  273. para_content['img_path'] = join_path(
  274. img_buket_path,
  275. block['lines'][0]['spans'][0]['image_path'])
  276. if block['type'] == BlockType.ImageCaption:
  277. para_content['img_caption'] = merge_para_with_text(block, parse_type=parse_type, lang=lang)
  278. if block['type'] == BlockType.ImageFootnote:
  279. para_content['img_footnote'] = merge_para_with_text(block, parse_type=parse_type, lang=lang)
  280. elif para_type == BlockType.Table:
  281. para_content = {'type': 'table'}
  282. for block in para_block['blocks']:
  283. if block['type'] == BlockType.TableBody:
  284. if block["lines"][0]["spans"][0].get('latex', ''):
  285. para_content['table_body'] = f"\n\n$\n {block['lines'][0]['spans'][0]['latex']}\n$\n\n"
  286. elif block["lines"][0]["spans"][0].get('html', ''):
  287. para_content['table_body'] = f"\n\n{block['lines'][0]['spans'][0]['html']}\n\n"
  288. para_content['img_path'] = join_path(img_buket_path, block["lines"][0]["spans"][0]['image_path'])
  289. if block['type'] == BlockType.TableCaption:
  290. para_content['table_caption'] = merge_para_with_text(block, parse_type=parse_type, lang=lang)
  291. if block['type'] == BlockType.TableFootnote:
  292. para_content['table_footnote'] = merge_para_with_text(block, parse_type=parse_type, lang=lang)
  293. para_content['page_idx'] = page_idx
  294. if drop_reason is not None:
  295. para_content['drop_reason'] = drop_reason
  296. return para_content
  297. def make_standard_format_with_para(pdf_info_dict: list, img_buket_path: str):
  298. content_list = []
  299. for page_info in pdf_info_dict:
  300. paras_of_layout = page_info.get('para_blocks')
  301. if not paras_of_layout:
  302. continue
  303. for para_block in paras_of_layout:
  304. para_content = para_to_standard_format_v2(para_block,
  305. img_buket_path)
  306. content_list.append(para_content)
  307. return content_list
  308. def line_to_standard_format(line, img_buket_path):
  309. line_text = ''
  310. inline_equation_num = 0
  311. for span in line['spans']:
  312. if not span.get('content'):
  313. if not span.get('image_path'):
  314. continue
  315. else:
  316. if span['type'] == ContentType.Image:
  317. content = {
  318. 'type': 'image',
  319. 'img_path': join_path(img_buket_path,
  320. span['image_path']),
  321. }
  322. return content
  323. elif span['type'] == ContentType.Table:
  324. content = {
  325. 'type': 'table',
  326. 'img_path': join_path(img_buket_path,
  327. span['image_path']),
  328. }
  329. return content
  330. else:
  331. if span['type'] == ContentType.InterlineEquation:
  332. interline_equation = span['content']
  333. content = {
  334. 'type': 'equation',
  335. 'latex': f'$$\n{interline_equation}\n$$'
  336. }
  337. return content
  338. elif span['type'] == ContentType.InlineEquation:
  339. inline_equation = span['content']
  340. line_text += f'${inline_equation}$'
  341. inline_equation_num += 1
  342. elif span['type'] == ContentType.Text:
  343. text_content = ocr_escape_special_markdown_char(
  344. span['content']) # 转义特殊符号
  345. line_text += text_content
  346. content = {
  347. 'type': 'text',
  348. 'text': line_text,
  349. 'inline_equation_num': inline_equation_num,
  350. }
  351. return content
  352. def ocr_mk_mm_standard_format(pdf_info_dict: list):
  353. """content_list type string
  354. image/text/table/equation(行间的单独拿出来,行内的和text合并) latex string
  355. latex文本字段。 text string 纯文本格式的文本数据。 md string
  356. markdown格式的文本数据。 img_path string s3://full/path/to/img.jpg."""
  357. content_list = []
  358. for page_info in pdf_info_dict:
  359. blocks = page_info.get('preproc_blocks')
  360. if not blocks:
  361. continue
  362. for block in blocks:
  363. for line in block['lines']:
  364. content = line_to_standard_format(line)
  365. content_list.append(content)
  366. return content_list
  367. def union_make(pdf_info_dict: list,
  368. make_mode: str,
  369. drop_mode: str,
  370. img_buket_path: str = '',
  371. parse_type: str = "auto",
  372. lang=None):
  373. output_content = []
  374. for page_info in pdf_info_dict:
  375. drop_reason_flag = False
  376. drop_reason = None
  377. if page_info.get('need_drop', False):
  378. drop_reason = page_info.get('drop_reason')
  379. if drop_mode == DropMode.NONE:
  380. pass
  381. elif drop_mode == DropMode.NONE_WITH_REASON:
  382. drop_reason_flag = True
  383. elif drop_mode == DropMode.WHOLE_PDF:
  384. raise Exception((f'drop_mode is {DropMode.WHOLE_PDF} ,'
  385. f'drop_reason is {drop_reason}'))
  386. elif drop_mode == DropMode.SINGLE_PAGE:
  387. logger.warning((f'drop_mode is {DropMode.SINGLE_PAGE} ,'
  388. f'drop_reason is {drop_reason}'))
  389. continue
  390. else:
  391. raise Exception('drop_mode can not be null')
  392. paras_of_layout = page_info.get('para_blocks')
  393. page_idx = page_info.get('page_idx')
  394. if not paras_of_layout:
  395. continue
  396. if make_mode == MakeMode.MM_MD:
  397. page_markdown = ocr_mk_markdown_with_para_core_v2(
  398. paras_of_layout, 'mm', img_buket_path, parse_type=parse_type, lang=lang)
  399. output_content.extend(page_markdown)
  400. elif make_mode == MakeMode.NLP_MD:
  401. page_markdown = ocr_mk_markdown_with_para_core_v2(
  402. paras_of_layout, 'nlp', parse_type=parse_type, lang=lang)
  403. output_content.extend(page_markdown)
  404. elif make_mode == MakeMode.STANDARD_FORMAT:
  405. for para_block in paras_of_layout:
  406. if drop_reason_flag:
  407. para_content = para_to_standard_format_v2(
  408. para_block, img_buket_path, page_idx, parse_type=parse_type, lang=lang, drop_reason=drop_reason)
  409. else:
  410. para_content = para_to_standard_format_v2(
  411. para_block, img_buket_path, page_idx, parse_type=parse_type, lang=lang)
  412. output_content.append(para_content)
  413. if make_mode in [MakeMode.MM_MD, MakeMode.NLP_MD]:
  414. return '\n\n'.join(output_content)
  415. elif make_mode == MakeMode.STANDARD_FORMAT:
  416. return output_content