doc_analyze_by_custom_model.py 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299
  1. import concurrent.futures as fut
  2. import multiprocessing as mp
  3. import os
  4. import time
  5. import numpy as np
  6. import torch
  7. os.environ['FLAGS_npu_jit_compile'] = '0' # 关闭paddle的jit编译
  8. os.environ['FLAGS_use_stride_kernel'] = '0'
  9. os.environ['PYTORCH_ENABLE_MPS_FALLBACK'] = '1' # 让mps可以fallback
  10. os.environ['NO_ALBUMENTATIONS_UPDATE'] = '1' # 禁止albumentations检查更新
  11. from loguru import logger
  12. from magic_pdf.model.sub_modules.model_utils import get_vram
  13. from magic_pdf.config.enums import SupportedPdfParseMethod
  14. import magic_pdf.model as model_config
  15. from magic_pdf.data.dataset import Dataset
  16. from magic_pdf.libs.clean_memory import clean_memory
  17. from magic_pdf.libs.config_reader import (get_device, get_formula_config,
  18. get_layout_config,
  19. get_local_models_dir,
  20. get_table_recog_config)
  21. from magic_pdf.model.model_list import MODEL
  22. # from magic_pdf.operators.models import InferenceResult
  23. class ModelSingleton:
  24. _instance = None
  25. _models = {}
  26. def __new__(cls, *args, **kwargs):
  27. if cls._instance is None:
  28. cls._instance = super().__new__(cls)
  29. return cls._instance
  30. def get_model(
  31. self,
  32. ocr: bool,
  33. show_log: bool,
  34. lang=None,
  35. layout_model=None,
  36. formula_enable=None,
  37. table_enable=None,
  38. ):
  39. key = (ocr, show_log, lang, layout_model, formula_enable, table_enable)
  40. if key not in self._models:
  41. self._models[key] = custom_model_init(
  42. ocr=ocr,
  43. show_log=show_log,
  44. lang=lang,
  45. layout_model=layout_model,
  46. formula_enable=formula_enable,
  47. table_enable=table_enable,
  48. )
  49. return self._models[key]
  50. def custom_model_init(
  51. ocr: bool = False,
  52. show_log: bool = False,
  53. lang=None,
  54. layout_model=None,
  55. formula_enable=None,
  56. table_enable=None,
  57. ):
  58. model = None
  59. if model_config.__model_mode__ == 'lite':
  60. logger.warning(
  61. 'The Lite mode is provided for developers to conduct testing only, and the output quality is '
  62. 'not guaranteed to be reliable.'
  63. )
  64. model = MODEL.Paddle
  65. elif model_config.__model_mode__ == 'full':
  66. model = MODEL.PEK
  67. if model_config.__use_inside_model__:
  68. model_init_start = time.time()
  69. if model == MODEL.Paddle:
  70. from magic_pdf.model.pp_structure_v2 import CustomPaddleModel
  71. custom_model = CustomPaddleModel(ocr=ocr, show_log=show_log, lang=lang)
  72. elif model == MODEL.PEK:
  73. from magic_pdf.model.pdf_extract_kit import CustomPEKModel
  74. # 从配置文件读取model-dir和device
  75. local_models_dir = get_local_models_dir()
  76. device = get_device()
  77. layout_config = get_layout_config()
  78. if layout_model is not None:
  79. layout_config['model'] = layout_model
  80. formula_config = get_formula_config()
  81. if formula_enable is not None:
  82. formula_config['enable'] = formula_enable
  83. table_config = get_table_recog_config()
  84. if table_enable is not None:
  85. table_config['enable'] = table_enable
  86. model_input = {
  87. 'ocr': ocr,
  88. 'show_log': show_log,
  89. 'models_dir': local_models_dir,
  90. 'device': device,
  91. 'table_config': table_config,
  92. 'layout_config': layout_config,
  93. 'formula_config': formula_config,
  94. 'lang': lang,
  95. }
  96. custom_model = CustomPEKModel(**model_input)
  97. else:
  98. logger.error('Not allow model_name!')
  99. exit(1)
  100. model_init_cost = time.time() - model_init_start
  101. logger.info(f'model init cost: {model_init_cost}')
  102. else:
  103. logger.error('use_inside_model is False, not allow to use inside model')
  104. exit(1)
  105. return custom_model
  106. def doc_analyze(
  107. dataset: Dataset,
  108. ocr: bool = False,
  109. show_log: bool = False,
  110. start_page_id=0,
  111. end_page_id=None,
  112. lang=None,
  113. layout_model=None,
  114. formula_enable=None,
  115. table_enable=None,
  116. ):
  117. end_page_id = (
  118. end_page_id
  119. if end_page_id is not None and end_page_id >= 0
  120. else len(dataset) - 1
  121. )
  122. MIN_BATCH_INFERENCE_SIZE = int(os.environ.get('MINERU_MIN_BATCH_INFERENCE_SIZE', 200))
  123. images = []
  124. page_wh_list = []
  125. for index in range(len(dataset)):
  126. if start_page_id <= index <= end_page_id:
  127. page_data = dataset.get_page(index)
  128. img_dict = page_data.get_image()
  129. images.append(img_dict['img'])
  130. page_wh_list.append((img_dict['width'], img_dict['height']))
  131. if lang is None or lang == 'auto':
  132. images_with_extra_info = [(images[index], ocr, dataset._lang) for index in range(len(dataset))]
  133. else:
  134. images_with_extra_info = [(images[index], ocr, lang) for index in range(len(dataset))]
  135. if len(images) >= MIN_BATCH_INFERENCE_SIZE:
  136. batch_size = MIN_BATCH_INFERENCE_SIZE
  137. batch_images = [images_with_extra_info[i:i+batch_size] for i in range(0, len(images_with_extra_info), batch_size)]
  138. else:
  139. batch_images = [images_with_extra_info]
  140. results = []
  141. for sn, batch_image in enumerate(batch_images):
  142. _, result = may_batch_image_analyze(batch_image, sn, ocr, show_log,layout_model, formula_enable, table_enable)
  143. results.extend(result)
  144. model_json = []
  145. for index in range(len(dataset)):
  146. if start_page_id <= index <= end_page_id:
  147. result = results.pop(0)
  148. page_width, page_height = page_wh_list.pop(0)
  149. else:
  150. result = []
  151. page_height = 0
  152. page_width = 0
  153. page_info = {'page_no': index, 'width': page_width, 'height': page_height}
  154. page_dict = {'layout_dets': result, 'page_info': page_info}
  155. model_json.append(page_dict)
  156. from magic_pdf.operators.models import InferenceResult
  157. return InferenceResult(model_json, dataset)
  158. def batch_doc_analyze(
  159. datasets: list[Dataset],
  160. parse_method: str,
  161. show_log: bool = False,
  162. lang=None,
  163. layout_model=None,
  164. formula_enable=None,
  165. table_enable=None,
  166. ):
  167. MIN_BATCH_INFERENCE_SIZE = int(os.environ.get('MINERU_MIN_BATCH_INFERENCE_SIZE', 100))
  168. batch_size = MIN_BATCH_INFERENCE_SIZE
  169. images = []
  170. page_wh_list = []
  171. images_with_extra_info = []
  172. for dataset in datasets:
  173. for index in range(len(dataset)):
  174. if lang is None or lang == 'auto':
  175. _lang = dataset._lang
  176. else:
  177. _lang = lang
  178. page_data = dataset.get_page(index)
  179. img_dict = page_data.get_image()
  180. images.append(img_dict['img'])
  181. page_wh_list.append((img_dict['width'], img_dict['height']))
  182. if parse_method == 'auto':
  183. images_with_extra_info.append((images[-1], dataset.classify() == SupportedPdfParseMethod.OCR, _lang))
  184. else:
  185. images_with_extra_info.append((images[-1], parse_method == 'ocr', _lang))
  186. batch_images = [images_with_extra_info[i:i+batch_size] for i in range(0, len(images_with_extra_info), batch_size)]
  187. results = []
  188. for sn, batch_image in enumerate(batch_images):
  189. _, result = may_batch_image_analyze(batch_image, sn, True, show_log, layout_model, formula_enable, table_enable)
  190. results.extend(result)
  191. infer_results = []
  192. from magic_pdf.operators.models import InferenceResult
  193. for index in range(len(datasets)):
  194. dataset = datasets[index]
  195. model_json = []
  196. for i in range(len(dataset)):
  197. result = results.pop(0)
  198. page_width, page_height = page_wh_list.pop(0)
  199. page_info = {'page_no': i, 'width': page_width, 'height': page_height}
  200. page_dict = {'layout_dets': result, 'page_info': page_info}
  201. model_json.append(page_dict)
  202. infer_results.append(InferenceResult(model_json, dataset))
  203. return infer_results
  204. def may_batch_image_analyze(
  205. images_with_extra_info: list[(np.ndarray, bool, str)],
  206. idx: int,
  207. ocr: bool,
  208. show_log: bool = False,
  209. layout_model=None,
  210. formula_enable=None,
  211. table_enable=None):
  212. # os.environ['CUDA_VISIBLE_DEVICES'] = str(idx)
  213. # 关闭paddle的信号处理
  214. import paddle
  215. paddle.disable_signal_handler()
  216. from magic_pdf.model.batch_analyze import BatchAnalyze
  217. model_manager = ModelSingleton()
  218. images = [image for image, _, _ in images_with_extra_info]
  219. batch_analyze = False
  220. batch_ratio = 1
  221. device = get_device()
  222. if str(device).startswith('npu'):
  223. import torch_npu
  224. if torch_npu.npu.is_available():
  225. torch.npu.set_compile_mode(jit_compile=False)
  226. if str(device).startswith('npu') or str(device).startswith('cuda'):
  227. gpu_memory = int(os.getenv('VIRTUAL_VRAM_SIZE', round(get_vram(device))))
  228. if gpu_memory is not None:
  229. if gpu_memory >= 16:
  230. batch_ratio = 16
  231. elif gpu_memory >= 12:
  232. batch_ratio = 8
  233. elif gpu_memory >= 8:
  234. batch_ratio = 4
  235. elif gpu_memory >= 6:
  236. batch_ratio = 2
  237. else:
  238. batch_ratio = 1
  239. logger.info(f'gpu_memory: {gpu_memory} GB, batch_ratio: {batch_ratio}')
  240. # batch_analyze = True
  241. elif str(device).startswith('mps'):
  242. # batch_analyze = True
  243. pass
  244. doc_analyze_start = time.time()
  245. batch_model = BatchAnalyze(model_manager, batch_ratio, show_log, layout_model, formula_enable, table_enable)
  246. results = batch_model(images_with_extra_info)
  247. gc_start = time.time()
  248. clean_memory(get_device())
  249. gc_time = round(time.time() - gc_start, 2)
  250. logger.info(f'gc time: {gc_time}')
  251. doc_analyze_time = round(time.time() - doc_analyze_start, 2)
  252. doc_analyze_speed = round(len(images) / doc_analyze_time, 2)
  253. logger.info(
  254. f'doc analyze time: {round(time.time() - doc_analyze_start, 2)},'
  255. f' speed: {doc_analyze_speed} pages/second'
  256. )
  257. return (idx, results)