api.rst 8.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279
  1. Api Usage
  2. ===========
  3. PDF
  4. ----
  5. Local File Example
  6. ^^^^^^^^^^^^^^^^^^
  7. .. code:: python
  8. import os
  9. from magic_pdf.data.data_reader_writer import FileBasedDataWriter, FileBasedDataReader
  10. from magic_pdf.data.dataset import PymuDocDataset
  11. from magic_pdf.model.doc_analyze_by_custom_model import doc_analyze
  12. from magic_pdf.config.enums import SupportedPdfParseMethod
  13. # args
  14. pdf_file_name = "abc.pdf" # replace with the real pdf path
  15. name_without_suff = pdf_file_name.split(".")[0]
  16. # prepare env
  17. local_image_dir, local_md_dir = "output/images", "output"
  18. image_dir = str(os.path.basename(local_image_dir))
  19. os.makedirs(local_image_dir, exist_ok=True)
  20. image_writer, md_writer = FileBasedDataWriter(local_image_dir), FileBasedDataWriter(
  21. local_md_dir
  22. )
  23. # read bytes
  24. reader1 = FileBasedDataReader("")
  25. pdf_bytes = reader1.read(pdf_file_name) # read the pdf content
  26. # proc
  27. ## Create Dataset Instance
  28. ds = PymuDocDataset(pdf_bytes)
  29. ## inference
  30. if ds.classify() == SupportedPdfParseMethod.OCR:
  31. infer_result = ds.apply(doc_analyze, ocr=True)
  32. ## pipeline
  33. pipe_result = infer_result.pipe_ocr_mode(image_writer)
  34. else:
  35. infer_result = ds.apply(doc_analyze, ocr=False)
  36. ## pipeline
  37. pipe_result = infer_result.pipe_txt_mode(image_writer)
  38. ### draw model result on each page
  39. infer_result.draw_model(os.path.join(local_md_dir, f"{name_without_suff}_model.pdf"))
  40. ### get model inference result
  41. model_inference_result = infer_result.get_infer_res()
  42. ### draw layout result on each page
  43. pipe_result.draw_layout(os.path.join(local_md_dir, f"{name_without_suff}_layout.pdf"))
  44. ### draw spans result on each page
  45. pipe_result.draw_span(os.path.join(local_md_dir, f"{name_without_suff}_spans.pdf"))
  46. ### get markdown content
  47. md_content = pipe_result.get_markdown(image_dir)
  48. ### dump markdown
  49. pipe_result.dump_md(md_writer, f"{name_without_suff}.md", image_dir)
  50. ### get content list content
  51. content_list_content = pipe_result.get_content_list(image_dir)
  52. ### dump content list
  53. pipe_result.dump_content_list(md_writer, f"{name_without_suff}_content_list.json", image_dir)
  54. ### get middle json
  55. middle_json_content = pipe_result.get_middle_json()
  56. ### dump middle json
  57. pipe_result.dump_middle_json(md_writer, f'{name_without_suff}_middle.json')
  58. S3 File Example
  59. ^^^^^^^^^^^^^^^^
  60. .. code:: python
  61. import os
  62. from magic_pdf.data.data_reader_writer import S3DataReader, S3DataWriter
  63. from magic_pdf.data.dataset import PymuDocDataset
  64. from magic_pdf.model.doc_analyze_by_custom_model import doc_analyze
  65. from magic_pdf.config.enums import SupportedPdfParseMethod
  66. bucket_name = "{Your S3 Bucket Name}" # replace with real bucket name
  67. ak = "{Your S3 access key}" # replace with real s3 access key
  68. sk = "{Your S3 secret key}" # replace with real s3 secret key
  69. endpoint_url = "{Your S3 endpoint_url}" # replace with real s3 endpoint_url
  70. reader = S3DataReader('unittest/tmp/', bucket_name, ak, sk, endpoint_url) # replace `unittest/tmp` with the real s3 prefix
  71. writer = S3DataWriter('unittest/tmp', bucket_name, ak, sk, endpoint_url)
  72. image_writer = S3DataWriter('unittest/tmp/images', bucket_name, ak, sk, endpoint_url)
  73. md_writer = S3DataWriter('unittest/tmp', bucket_name, ak, sk, endpoint_url)
  74. local_image_dir, local_md_dir = "output/images", "output"
  75. image_dir = str(os.path.basename(local_image_dir))
  76. # args
  77. pdf_file_name = (
  78. f"s3://{bucket_name}/unittest/tmp/bug5-11.pdf" # replace with the real s3 path
  79. )
  80. # prepare env
  81. local_dir = "output"
  82. name_without_suff = os.path.basename(pdf_file_name).split(".")[0]
  83. # read bytes
  84. pdf_bytes = reader.read(pdf_file_name) # read the pdf content
  85. # proc
  86. ## Create Dataset Instance
  87. ds = PymuDocDataset(pdf_bytes)
  88. ## inference
  89. if ds.classify() == SupportedPdfParseMethod.OCR:
  90. infer_result = ds.apply(doc_analyze, ocr=True)
  91. ## pipeline
  92. pipe_result = infer_result.pipe_ocr_mode(image_writer)
  93. else:
  94. infer_result = ds.apply(doc_analyze, ocr=False)
  95. ## pipeline
  96. pipe_result = infer_result.pipe_txt_mode(image_writer)
  97. ### draw model result on each page
  98. infer_result.draw_model(os.path.join(local_md_dir, f"{name_without_suff}_model.pdf"))
  99. ### get model inference result
  100. model_inference_result = infer_result.get_infer_res()
  101. ### draw layout result on each page
  102. pipe_result.draw_layout(os.path.join(local_md_dir, f"{name_without_suff}_layout.pdf"))
  103. ### draw spans result on each page
  104. pipe_result.draw_span(os.path.join(local_md_dir, f"{name_without_suff}_spans.pdf"))
  105. ### dump markdown
  106. pipe_result.dump_md(md_writer, f"{name_without_suff}.md", image_dir)
  107. ### dump content list
  108. pipe_result.dump_content_list(md_writer, f"{name_without_suff}_content_list.json", image_dir)
  109. ### get markdown content
  110. md_content = pipe_result.get_markdown(image_dir)
  111. ### get content list content
  112. content_list_content = pipe_result.get_content_list(image_dir)
  113. ### get middle json
  114. middle_json_content = pipe_result.get_middle_json()
  115. ### dump middle json
  116. pipe_result.dump_middle_json(md_writer, f'{name_without_suff}_middle.json')
  117. MS-Office
  118. ----------
  119. .. code:: python
  120. import os
  121. from magic_pdf.data.data_reader_writer import FileBasedDataWriter, FileBasedDataReader
  122. from magic_pdf.model.doc_analyze_by_custom_model import doc_analyze
  123. from magic_pdf.data.read_api import read_local_office
  124. # prepare env
  125. local_image_dir, local_md_dir = "output/images", "output"
  126. image_dir = str(os.path.basename(local_image_dir))
  127. os.makedirs(local_image_dir, exist_ok=True)
  128. image_writer, md_writer = FileBasedDataWriter(local_image_dir), FileBasedDataWriter(
  129. local_md_dir
  130. )
  131. # proc
  132. ## Create Dataset Instance
  133. input_file = "some_ppt.ppt" # replace with real ms-office file
  134. input_file_name = input_file.split(".")[0]
  135. ds = read_local_office(input_file)[0]
  136. ds.apply(doc_analyze, ocr=True).pipe_txt_mode(image_writer).dump_md(
  137. md_writer, f"{input_file_name}.md", image_dir
  138. )
  139. This code snippet can be used to manipulate **ppt**, **pptx**, **doc**, **docx** file
  140. Image
  141. ---------
  142. Single Image File
  143. ^^^^^^^^^^^^^^^^^^^
  144. .. code:: python
  145. import os
  146. from magic_pdf.data.data_reader_writer import FileBasedDataWriter
  147. from magic_pdf.model.doc_analyze_by_custom_model import doc_analyze
  148. from magic_pdf.data.read_api import read_local_images
  149. # prepare env
  150. local_image_dir, local_md_dir = "output/images", "output"
  151. image_dir = str(os.path.basename(local_image_dir))
  152. os.makedirs(local_image_dir, exist_ok=True)
  153. image_writer, md_writer = FileBasedDataWriter(local_image_dir), FileBasedDataWriter(
  154. local_md_dir
  155. )
  156. # proc
  157. ## Create Dataset Instance
  158. input_file = "some_image.jpg" # replace with real image file
  159. input_file_name = input_file.split(".")[0]
  160. ds = read_local_images(input_file)[0]
  161. ds.apply(doc_analyze, ocr=True).pipe_ocr_mode(image_writer).dump_md(
  162. md_writer, f"{input_file_name}.md", image_dir
  163. )
  164. Directory That Contains Images
  165. ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  166. .. code:: python
  167. import os
  168. from magic_pdf.data.data_reader_writer import FileBasedDataWriter
  169. from magic_pdf.model.doc_analyze_by_custom_model import doc_analyze
  170. from magic_pdf.data.read_api import read_local_images
  171. # prepare env
  172. local_image_dir, local_md_dir = "output/images", "output"
  173. image_dir = str(os.path.basename(local_image_dir))
  174. os.makedirs(local_image_dir, exist_ok=True)
  175. image_writer, md_writer = FileBasedDataWriter(local_image_dir), FileBasedDataWriter(
  176. local_md_dir
  177. )
  178. # proc
  179. ## Create Dataset Instance
  180. input_directory = "some_image_dir/" # replace with real directory that contains images
  181. dss = read_local_images(input_directory, suffixes=['.png', '.jpg'])
  182. count = 0
  183. for ds in dss:
  184. ds.apply(doc_analyze, ocr=True).pipe_ocr_mode(image_writer).dump_md(
  185. md_writer, f"{count}.md", image_dir
  186. )
  187. count += 1
  188. Check :doc:`../data/data_reader_writer` for more [reader | writer] examples and check :doc:`../../api/pipe_operators` or :doc:`../../api/model_operators` for api details