doc_analyze_by_custom_model.py 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347
  1. import concurrent.futures as fut
  2. import multiprocessing as mp
  3. import os
  4. import time
  5. import numpy as np
  6. import torch
  7. os.environ['FLAGS_npu_jit_compile'] = '0' # 关闭paddle的jit编译
  8. os.environ['FLAGS_use_stride_kernel'] = '0'
  9. os.environ['PYTORCH_ENABLE_MPS_FALLBACK'] = '1' # 让mps可以fallback
  10. os.environ['NO_ALBUMENTATIONS_UPDATE'] = '1' # 禁止albumentations检查更新
  11. from loguru import logger
  12. from magic_pdf.model.sub_modules.model_utils import get_vram
  13. try:
  14. import torchtext
  15. if torchtext.__version__ >= '0.18.0':
  16. torchtext.disable_torchtext_deprecation_warning()
  17. except ImportError:
  18. pass
  19. import magic_pdf.model as model_config
  20. from magic_pdf.data.dataset import Dataset
  21. from magic_pdf.libs.clean_memory import clean_memory
  22. from magic_pdf.libs.config_reader import (get_device, get_formula_config,
  23. get_layout_config,
  24. get_local_models_dir,
  25. get_table_recog_config)
  26. from magic_pdf.model.model_list import MODEL
  27. # from magic_pdf.operators.models import InferenceResult
  28. class ModelSingleton:
  29. _instance = None
  30. _models = {}
  31. def __new__(cls, *args, **kwargs):
  32. if cls._instance is None:
  33. cls._instance = super().__new__(cls)
  34. return cls._instance
  35. def get_model(
  36. self,
  37. ocr: bool,
  38. show_log: bool,
  39. lang=None,
  40. layout_model=None,
  41. formula_enable=None,
  42. table_enable=None,
  43. ):
  44. key = (ocr, show_log, lang, layout_model, formula_enable, table_enable)
  45. if key not in self._models:
  46. self._models[key] = custom_model_init(
  47. ocr=ocr,
  48. show_log=show_log,
  49. lang=lang,
  50. layout_model=layout_model,
  51. formula_enable=formula_enable,
  52. table_enable=table_enable,
  53. )
  54. return self._models[key]
  55. def custom_model_init(
  56. ocr: bool = False,
  57. show_log: bool = False,
  58. lang=None,
  59. layout_model=None,
  60. formula_enable=None,
  61. table_enable=None,
  62. ):
  63. model = None
  64. if model_config.__model_mode__ == 'lite':
  65. logger.warning(
  66. 'The Lite mode is provided for developers to conduct testing only, and the output quality is '
  67. 'not guaranteed to be reliable.'
  68. )
  69. model = MODEL.Paddle
  70. elif model_config.__model_mode__ == 'full':
  71. model = MODEL.PEK
  72. if model_config.__use_inside_model__:
  73. model_init_start = time.time()
  74. if model == MODEL.Paddle:
  75. from magic_pdf.model.pp_structure_v2 import CustomPaddleModel
  76. custom_model = CustomPaddleModel(ocr=ocr, show_log=show_log, lang=lang)
  77. elif model == MODEL.PEK:
  78. from magic_pdf.model.pdf_extract_kit import CustomPEKModel
  79. # 从配置文件读取model-dir和device
  80. local_models_dir = get_local_models_dir()
  81. device = get_device()
  82. layout_config = get_layout_config()
  83. if layout_model is not None:
  84. layout_config['model'] = layout_model
  85. formula_config = get_formula_config()
  86. if formula_enable is not None:
  87. formula_config['enable'] = formula_enable
  88. table_config = get_table_recog_config()
  89. if table_enable is not None:
  90. table_config['enable'] = table_enable
  91. model_input = {
  92. 'ocr': ocr,
  93. 'show_log': show_log,
  94. 'models_dir': local_models_dir,
  95. 'device': device,
  96. 'table_config': table_config,
  97. 'layout_config': layout_config,
  98. 'formula_config': formula_config,
  99. 'lang': lang,
  100. }
  101. custom_model = CustomPEKModel(**model_input)
  102. else:
  103. logger.error('Not allow model_name!')
  104. exit(1)
  105. model_init_cost = time.time() - model_init_start
  106. logger.info(f'model init cost: {model_init_cost}')
  107. else:
  108. logger.error('use_inside_model is False, not allow to use inside model')
  109. exit(1)
  110. return custom_model
  111. def doc_analyze(
  112. dataset: Dataset,
  113. ocr: bool = False,
  114. show_log: bool = False,
  115. start_page_id=0,
  116. end_page_id=None,
  117. lang=None,
  118. layout_model=None,
  119. formula_enable=None,
  120. table_enable=None,
  121. ):
  122. end_page_id = (
  123. end_page_id
  124. if end_page_id is not None and end_page_id >= 0
  125. else len(dataset) - 1
  126. )
  127. MIN_BATCH_INFERENCE_SIZE = int(os.environ.get('MINERU_MIN_BATCH_INFERENCE_SIZE', 100))
  128. images = []
  129. page_wh_list = []
  130. for index in range(len(dataset)):
  131. if start_page_id <= index <= end_page_id:
  132. page_data = dataset.get_page(index)
  133. img_dict = page_data.get_image()
  134. images.append(img_dict['img'])
  135. page_wh_list.append((img_dict['width'], img_dict['height']))
  136. if len(images) >= MIN_BATCH_INFERENCE_SIZE:
  137. batch_size = MIN_BATCH_INFERENCE_SIZE
  138. batch_images = [images[i:i+batch_size] for i in range(0, len(images), batch_size)]
  139. else:
  140. batch_images = [images]
  141. results = []
  142. for sn, batch_image in enumerate(batch_images):
  143. _, result = may_batch_image_analyze(batch_image, sn, ocr, show_log, lang, layout_model, formula_enable, table_enable)
  144. results.extend(result)
  145. model_json = []
  146. for index in range(len(dataset)):
  147. if start_page_id <= index <= end_page_id:
  148. result = results.pop(0)
  149. page_width, page_height = page_wh_list.pop(0)
  150. else:
  151. result = []
  152. page_height = 0
  153. page_width = 0
  154. page_info = {'page_no': index, 'width': page_width, 'height': page_height}
  155. page_dict = {'layout_dets': result, 'page_info': page_info}
  156. model_json.append(page_dict)
  157. from magic_pdf.operators.models import InferenceResult
  158. return InferenceResult(model_json, dataset)
  159. def batch_doc_analyze(
  160. datasets: list[Dataset],
  161. ocr: bool = False,
  162. show_log: bool = False,
  163. lang=None,
  164. layout_model=None,
  165. formula_enable=None,
  166. table_enable=None,
  167. ):
  168. MIN_BATCH_INFERENCE_SIZE = int(os.environ.get('MINERU_MIN_BATCH_INFERENCE_SIZE', 100))
  169. images = []
  170. page_wh_list = []
  171. for dataset in datasets:
  172. for index in range(len(dataset)):
  173. page_data = dataset.get_page(index)
  174. img_dict = page_data.get_image()
  175. images.append(img_dict['img'])
  176. page_wh_list.append((img_dict['width'], img_dict['height']))
  177. if len(images) >= MIN_BATCH_INFERENCE_SIZE:
  178. batch_size = MIN_BATCH_INFERENCE_SIZE
  179. batch_images = [images[i:i+batch_size] for i in range(0, len(images), batch_size)]
  180. else:
  181. batch_images = [images]
  182. results = []
  183. for sn, batch_image in enumerate(batch_images):
  184. _, result = may_batch_image_analyze(batch_image, sn, ocr, show_log, lang, layout_model, formula_enable, table_enable)
  185. results.extend(result)
  186. infer_results = []
  187. from magic_pdf.operators.models import InferenceResult
  188. for index in range(len(datasets)):
  189. dataset = datasets[index]
  190. model_json = []
  191. for i in range(len(dataset)):
  192. result = results.pop(0)
  193. page_width, page_height = page_wh_list.pop(0)
  194. page_info = {'page_no': i, 'width': page_width, 'height': page_height}
  195. page_dict = {'layout_dets': result, 'page_info': page_info}
  196. model_json.append(page_dict)
  197. infer_results.append(InferenceResult(model_json, dataset))
  198. return infer_results
  199. def may_batch_image_analyze(
  200. images: list[np.ndarray],
  201. idx: int,
  202. ocr: bool = False,
  203. show_log: bool = False,
  204. lang=None,
  205. layout_model=None,
  206. formula_enable=None,
  207. table_enable=None):
  208. # os.environ['CUDA_VISIBLE_DEVICES'] = str(idx)
  209. # 关闭paddle的信号处理
  210. import paddle
  211. paddle.disable_signal_handler()
  212. from magic_pdf.model.batch_analyze import BatchAnalyze
  213. model_manager = ModelSingleton()
  214. custom_model = model_manager.get_model(
  215. ocr, show_log, lang, layout_model, formula_enable, table_enable
  216. )
  217. batch_analyze = False
  218. batch_ratio = 1
  219. device = get_device()
  220. npu_support = False
  221. if str(device).startswith('npu'):
  222. import torch_npu
  223. if torch_npu.npu.is_available():
  224. npu_support = True
  225. torch.npu.set_compile_mode(jit_compile=False)
  226. if torch.cuda.is_available() and device != 'cpu' or npu_support:
  227. gpu_memory = int(os.getenv('VIRTUAL_VRAM_SIZE', round(get_vram(device))))
  228. if gpu_memory is not None and gpu_memory >= 8:
  229. if gpu_memory >= 20:
  230. batch_ratio = 16
  231. elif gpu_memory >= 15:
  232. batch_ratio = 8
  233. elif gpu_memory >= 10:
  234. batch_ratio = 4
  235. else:
  236. batch_ratio = 2
  237. logger.info(f'gpu_memory: {gpu_memory} GB, batch_ratio: {batch_ratio}')
  238. batch_analyze = True
  239. doc_analyze_start = time.time()
  240. if batch_analyze:
  241. """# batch analyze
  242. images = []
  243. page_wh_list = []
  244. for index in range(len(dataset)):
  245. if start_page_id <= index <= end_page_id:
  246. page_data = dataset.get_page(index)
  247. img_dict = page_data.get_image()
  248. images.append(img_dict['img'])
  249. page_wh_list.append((img_dict['width'], img_dict['height']))
  250. """
  251. batch_model = BatchAnalyze(model=custom_model, batch_ratio=batch_ratio)
  252. results = batch_model(images)
  253. """
  254. for index in range(len(dataset)):
  255. if start_page_id <= index <= end_page_id:
  256. result = analyze_result.pop(0)
  257. page_width, page_height = page_wh_list.pop(0)
  258. else:
  259. result = []
  260. page_height = 0
  261. page_width = 0
  262. page_info = {'page_no': index, 'width': page_width, 'height': page_height}
  263. page_dict = {'layout_dets': result, 'page_info': page_info}
  264. model_json.append(page_dict)
  265. """
  266. else:
  267. # single analyze
  268. """
  269. for index in range(len(dataset)):
  270. page_data = dataset.get_page(index)
  271. img_dict = page_data.get_image()
  272. img = img_dict['img']
  273. page_width = img_dict['width']
  274. page_height = img_dict['height']
  275. if start_page_id <= index <= end_page_id:
  276. page_start = time.time()
  277. result = custom_model(img)
  278. logger.info(f'-----page_id : {index}, page total time: {round(time.time() - page_start, 2)}-----')
  279. else:
  280. result = []
  281. page_info = {'page_no': index, 'width': page_width, 'height': page_height}
  282. page_dict = {'layout_dets': result, 'page_info': page_info}
  283. model_json.append(page_dict)
  284. """
  285. results = []
  286. for img_idx, img in enumerate(images):
  287. inference_start = time.time()
  288. result = custom_model(img)
  289. logger.info(f'-----image index : {img_idx}, image inference total time: {round(time.time() - inference_start, 2)}-----')
  290. results.append(result)
  291. gc_start = time.time()
  292. clean_memory(get_device())
  293. gc_time = round(time.time() - gc_start, 2)
  294. logger.info(f'gc time: {gc_time}')
  295. doc_analyze_time = round(time.time() - doc_analyze_start, 2)
  296. doc_analyze_speed = round(len(images) / doc_analyze_time, 2)
  297. logger.info(
  298. f'doc analyze time: {round(time.time() - doc_analyze_start, 2)},'
  299. f' speed: {doc_analyze_speed} pages/second'
  300. )
  301. return (idx, results)