demo.py 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245
  1. # Copyright (c) Opendatalab. All rights reserved.
  2. import copy
  3. import json
  4. import os
  5. from pathlib import Path
  6. from loguru import logger
  7. from mineru.cli.common import convert_pdf_bytes_to_bytes_by_pypdfium2, prepare_env, read_fn
  8. from mineru.data.data_reader_writer import FileBasedDataWriter
  9. from mineru.utils.draw_bbox import draw_layout_bbox, draw_span_bbox
  10. from mineru.utils.enum_class import MakeMode
  11. from mineru.backend.vlm.vlm_analyze import doc_analyze as vlm_doc_analyze
  12. from mineru.backend.pipeline.pipeline_analyze import doc_analyze as pipeline_doc_analyze
  13. from mineru.backend.pipeline.pipeline_middle_json_mkcontent import union_make as pipeline_union_make
  14. from mineru.backend.pipeline.model_json_to_middle_json import result_to_middle_json as pipeline_result_to_middle_json
  15. from mineru.backend.vlm.vlm_middle_json_mkcontent import union_make as vlm_union_make
  16. from mineru.utils.models_download_utils import auto_download_and_get_model_root_path
  17. def do_parse(
  18. output_dir, # Output directory for storing parsing results
  19. pdf_file_names: list[str], # List of PDF file names to be parsed
  20. pdf_bytes_list: list[bytes], # List of PDF bytes to be parsed
  21. p_lang_list: list[str], # List of languages for each PDF, default is 'ch' (Chinese)
  22. backend="pipeline", # The backend for parsing PDF, default is 'pipeline'
  23. parse_method="auto", # The method for parsing PDF, default is 'auto'
  24. p_formula_enable=True, # Enable formula parsing
  25. p_table_enable=True, # Enable table parsing
  26. server_url=None, # Server URL for vlm-sglang-client backend
  27. f_draw_layout_bbox=True, # Whether to draw layout bounding boxes
  28. f_draw_span_bbox=True, # Whether to draw span bounding boxes
  29. f_dump_md=True, # Whether to dump markdown files
  30. f_dump_middle_json=True, # Whether to dump middle JSON files
  31. f_dump_model_output=True, # Whether to dump model output files
  32. f_dump_orig_pdf=True, # Whether to dump original PDF files
  33. f_dump_content_list=True, # Whether to dump content list files
  34. f_make_md_mode=MakeMode.MM_MD, # The mode for making markdown content, default is MM_MD
  35. start_page_id=0, # Start page ID for parsing, default is 0
  36. end_page_id=None, # End page ID for parsing, default is None (parse all pages until the end of the document)
  37. ):
  38. if backend == "pipeline":
  39. for idx, pdf_bytes in enumerate(pdf_bytes_list):
  40. new_pdf_bytes = convert_pdf_bytes_to_bytes_by_pypdfium2(pdf_bytes, start_page_id, end_page_id)
  41. pdf_bytes_list[idx] = new_pdf_bytes
  42. infer_results, all_image_lists, all_pdf_docs, lang_list, ocr_enabled_list = pipeline_doc_analyze(pdf_bytes_list, p_lang_list, parse_method=parse_method, formula_enable=p_formula_enable,table_enable=p_table_enable)
  43. for idx, model_list in enumerate(infer_results):
  44. model_json = copy.deepcopy(model_list)
  45. pdf_file_name = pdf_file_names[idx]
  46. local_image_dir, local_md_dir = prepare_env(output_dir, pdf_file_name, parse_method)
  47. image_writer, md_writer = FileBasedDataWriter(local_image_dir), FileBasedDataWriter(local_md_dir)
  48. images_list = all_image_lists[idx]
  49. pdf_doc = all_pdf_docs[idx]
  50. _lang = lang_list[idx]
  51. _ocr_enable = ocr_enabled_list[idx]
  52. middle_json = pipeline_result_to_middle_json(model_list, images_list, pdf_doc, image_writer, _lang, _ocr_enable, p_formula_enable)
  53. pdf_info = middle_json["pdf_info"]
  54. pdf_bytes = pdf_bytes_list[idx]
  55. if f_draw_layout_bbox:
  56. draw_layout_bbox(pdf_info, pdf_bytes, local_md_dir, f"{pdf_file_name}_layout.pdf")
  57. if f_draw_span_bbox:
  58. draw_span_bbox(pdf_info, pdf_bytes, local_md_dir, f"{pdf_file_name}_span.pdf")
  59. if f_dump_orig_pdf:
  60. md_writer.write(
  61. f"{pdf_file_name}_origin.pdf",
  62. pdf_bytes,
  63. )
  64. if f_dump_md:
  65. image_dir = str(os.path.basename(local_image_dir))
  66. md_content_str = pipeline_union_make(pdf_info, f_make_md_mode, image_dir)
  67. md_writer.write_string(
  68. f"{pdf_file_name}.md",
  69. md_content_str,
  70. )
  71. if f_dump_content_list:
  72. image_dir = str(os.path.basename(local_image_dir))
  73. content_list = pipeline_union_make(pdf_info, MakeMode.CONTENT_LIST, image_dir)
  74. md_writer.write_string(
  75. f"{pdf_file_name}_content_list.json",
  76. json.dumps(content_list, ensure_ascii=False, indent=4),
  77. )
  78. if f_dump_middle_json:
  79. md_writer.write_string(
  80. f"{pdf_file_name}_middle.json",
  81. json.dumps(middle_json, ensure_ascii=False, indent=4),
  82. )
  83. if f_dump_model_output:
  84. md_writer.write_string(
  85. f"{pdf_file_name}_model.json",
  86. json.dumps(model_json, ensure_ascii=False, indent=4),
  87. )
  88. logger.info(f"local output dir is {local_md_dir}")
  89. else:
  90. if backend.startswith("vlm-"):
  91. backend = backend[4:]
  92. f_draw_span_bbox = False
  93. parse_method = "vlm"
  94. for idx, pdf_bytes in enumerate(pdf_bytes_list):
  95. pdf_file_name = pdf_file_names[idx]
  96. pdf_bytes = convert_pdf_bytes_to_bytes_by_pypdfium2(pdf_bytes, start_page_id, end_page_id)
  97. local_image_dir, local_md_dir = prepare_env(output_dir, pdf_file_name, parse_method)
  98. image_writer, md_writer = FileBasedDataWriter(local_image_dir), FileBasedDataWriter(local_md_dir)
  99. middle_json, infer_result = vlm_doc_analyze(pdf_bytes, image_writer=image_writer, backend=backend, server_url=server_url)
  100. pdf_info = middle_json["pdf_info"]
  101. if f_draw_layout_bbox:
  102. draw_layout_bbox(pdf_info, pdf_bytes, local_md_dir, f"{pdf_file_name}_layout.pdf")
  103. if f_draw_span_bbox:
  104. draw_span_bbox(pdf_info, pdf_bytes, local_md_dir, f"{pdf_file_name}_span.pdf")
  105. if f_dump_orig_pdf:
  106. md_writer.write(
  107. f"{pdf_file_name}_origin.pdf",
  108. pdf_bytes,
  109. )
  110. if f_dump_md:
  111. image_dir = str(os.path.basename(local_image_dir))
  112. md_content_str = vlm_union_make(pdf_info, f_make_md_mode, image_dir)
  113. md_writer.write_string(
  114. f"{pdf_file_name}.md",
  115. md_content_str,
  116. )
  117. if f_dump_content_list:
  118. image_dir = str(os.path.basename(local_image_dir))
  119. content_list = vlm_union_make(pdf_info, MakeMode.CONTENT_LIST, image_dir)
  120. md_writer.write_string(
  121. f"{pdf_file_name}_content_list.json",
  122. json.dumps(content_list, ensure_ascii=False, indent=4),
  123. )
  124. if f_dump_middle_json:
  125. md_writer.write_string(
  126. f"{pdf_file_name}_middle.json",
  127. json.dumps(middle_json, ensure_ascii=False, indent=4),
  128. )
  129. if f_dump_model_output:
  130. model_output = ("\n" + "-" * 50 + "\n").join(infer_result)
  131. md_writer.write_string(
  132. f"{pdf_file_name}_model_output.txt",
  133. model_output,
  134. )
  135. logger.info(f"local output dir is {local_md_dir}")
  136. def parse_doc(
  137. path_list: list[Path],
  138. output_dir,
  139. lang="ch",
  140. backend="pipeline",
  141. method="auto",
  142. server_url=None,
  143. start_page_id=0,
  144. end_page_id=None
  145. ):
  146. """
  147. Parameter description:
  148. path_list: List of document paths to be parsed, can be PDF or image files.
  149. output_dir: Output directory for storing parsing results.
  150. lang: Language option, default is 'ch', optional values include['ch', 'ch_server', 'ch_lite', 'en', 'korean', 'japan', 'chinese_cht', 'ta', 'te', 'ka']。
  151. Input the languages in the pdf (if known) to improve OCR accuracy. Optional.
  152. Adapted only for the case where the backend is set to "pipeline"
  153. backend: the backend for parsing pdf:
  154. pipeline: More general.
  155. vlm-transformers: More general.
  156. vlm-sglang-engine: Faster(engine).
  157. vlm-sglang-client: Faster(client).
  158. without method specified, pipeline will be used by default.
  159. method: the method for parsing pdf:
  160. auto: Automatically determine the method based on the file type.
  161. txt: Use text extraction method.
  162. ocr: Use OCR method for image-based PDFs.
  163. Without method specified, 'auto' will be used by default.
  164. Adapted only for the case where the backend is set to "pipeline".
  165. server_url: When the backend is `sglang-client`, you need to specify the server_url, for example:`http://127.0.0.1:30000`
  166. start_page_id: Start page ID for parsing, default is 0
  167. end_page_id: End page ID for parsing, default is None (parse all pages until the end of the document)
  168. """
  169. try:
  170. file_name_list = []
  171. pdf_bytes_list = []
  172. lang_list = []
  173. for path in path_list:
  174. file_name = str(Path(path).stem)
  175. pdf_bytes = read_fn(path)
  176. file_name_list.append(file_name)
  177. pdf_bytes_list.append(pdf_bytes)
  178. lang_list.append(lang)
  179. do_parse(
  180. output_dir=output_dir,
  181. pdf_file_names=file_name_list,
  182. pdf_bytes_list=pdf_bytes_list,
  183. p_lang_list=lang_list,
  184. backend=backend,
  185. parse_method=method,
  186. server_url=server_url,
  187. start_page_id=start_page_id,
  188. end_page_id=end_page_id
  189. )
  190. except Exception as e:
  191. logger.exception(e)
  192. if __name__ == '__main__':
  193. # args
  194. __dir__ = os.path.dirname(os.path.abspath(__file__))
  195. pdf_files_dir = os.path.join(__dir__, "pdfs")
  196. output_dir = os.path.join(__dir__, "output")
  197. pdf_suffixes = [".pdf"]
  198. image_suffixes = [".png", ".jpeg", ".jpg"]
  199. doc_path_list = []
  200. for doc_path in Path(pdf_files_dir).glob('*'):
  201. if doc_path.suffix in pdf_suffixes + image_suffixes:
  202. doc_path_list.append(doc_path)
  203. """如果您由于网络问题无法下载模型,可以设置环境变量MINERU_MODEL_SOURCE为modelscope使用免代理仓库下载模型"""
  204. # os.environ['MINERU_MODEL_SOURCE'] = "modelscope"
  205. """Use pipeline mode if your environment does not support VLM"""
  206. parse_doc(doc_path_list, output_dir, backend="pipeline")
  207. """To enable VLM mode, change the backend to 'vlm-xxx'"""
  208. # parse_doc(doc_path_list, output_dir, backend="vlm-transformers") # more general.
  209. # parse_doc(doc_path_list, output_dir, backend="vlm-sglang-engine") # faster(engine).
  210. # parse_doc(doc_path_list, output_dir, backend="vlm-sglang-client", server_url="http://127.0.0.1:30000") # faster(client).