doc_analyze_by_custom_model.py 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363
  1. import concurrent.futures as fut
  2. import multiprocessing as mp
  3. import os
  4. import time
  5. import numpy as np
  6. import torch
  7. os.environ['FLAGS_npu_jit_compile'] = '0' # 关闭paddle的jit编译
  8. os.environ['FLAGS_use_stride_kernel'] = '0'
  9. os.environ['PYTORCH_ENABLE_MPS_FALLBACK'] = '1' # 让mps可以fallback
  10. os.environ['NO_ALBUMENTATIONS_UPDATE'] = '1' # 禁止albumentations检查更新
  11. from loguru import logger
  12. from magic_pdf.model.sub_modules.model_utils import get_vram
  13. import magic_pdf.model as model_config
  14. from magic_pdf.data.dataset import Dataset
  15. from magic_pdf.libs.clean_memory import clean_memory
  16. from magic_pdf.libs.config_reader import (get_device, get_formula_config,
  17. get_layout_config,
  18. get_local_models_dir,
  19. get_table_recog_config)
  20. from magic_pdf.model.model_list import MODEL
  21. # from magic_pdf.operators.models import InferenceResult
  22. class ModelSingleton:
  23. _instance = None
  24. _models = {}
  25. def __new__(cls, *args, **kwargs):
  26. if cls._instance is None:
  27. cls._instance = super().__new__(cls)
  28. return cls._instance
  29. def get_model(
  30. self,
  31. ocr: bool,
  32. show_log: bool,
  33. lang=None,
  34. layout_model=None,
  35. formula_enable=None,
  36. table_enable=None,
  37. ):
  38. key = (ocr, show_log, lang, layout_model, formula_enable, table_enable)
  39. if key not in self._models:
  40. self._models[key] = custom_model_init(
  41. ocr=ocr,
  42. show_log=show_log,
  43. lang=lang,
  44. layout_model=layout_model,
  45. formula_enable=formula_enable,
  46. table_enable=table_enable,
  47. )
  48. return self._models[key]
  49. def custom_model_init(
  50. ocr: bool = False,
  51. show_log: bool = False,
  52. lang=None,
  53. layout_model=None,
  54. formula_enable=None,
  55. table_enable=None,
  56. ):
  57. model = None
  58. if model_config.__model_mode__ == 'lite':
  59. logger.warning(
  60. 'The Lite mode is provided for developers to conduct testing only, and the output quality is '
  61. 'not guaranteed to be reliable.'
  62. )
  63. model = MODEL.Paddle
  64. elif model_config.__model_mode__ == 'full':
  65. model = MODEL.PEK
  66. if model_config.__use_inside_model__:
  67. model_init_start = time.time()
  68. if model == MODEL.Paddle:
  69. from magic_pdf.model.pp_structure_v2 import CustomPaddleModel
  70. custom_model = CustomPaddleModel(ocr=ocr, show_log=show_log, lang=lang)
  71. elif model == MODEL.PEK:
  72. from magic_pdf.model.pdf_extract_kit import CustomPEKModel
  73. # 从配置文件读取model-dir和device
  74. local_models_dir = get_local_models_dir()
  75. device = get_device()
  76. layout_config = get_layout_config()
  77. if layout_model is not None:
  78. layout_config['model'] = layout_model
  79. formula_config = get_formula_config()
  80. if formula_enable is not None:
  81. formula_config['enable'] = formula_enable
  82. table_config = get_table_recog_config()
  83. if table_enable is not None:
  84. table_config['enable'] = table_enable
  85. model_input = {
  86. 'ocr': ocr,
  87. 'show_log': show_log,
  88. 'models_dir': local_models_dir,
  89. 'device': device,
  90. 'table_config': table_config,
  91. 'layout_config': layout_config,
  92. 'formula_config': formula_config,
  93. 'lang': lang,
  94. }
  95. custom_model = CustomPEKModel(**model_input)
  96. else:
  97. logger.error('Not allow model_name!')
  98. exit(1)
  99. model_init_cost = time.time() - model_init_start
  100. logger.info(f'model init cost: {model_init_cost}')
  101. else:
  102. logger.error('use_inside_model is False, not allow to use inside model')
  103. exit(1)
  104. return custom_model
  105. def doc_analyze(
  106. dataset: Dataset,
  107. ocr: bool = False,
  108. show_log: bool = False,
  109. start_page_id=0,
  110. end_page_id=None,
  111. lang=None,
  112. layout_model=None,
  113. formula_enable=None,
  114. table_enable=None,
  115. ):
  116. end_page_id = (
  117. end_page_id
  118. if end_page_id is not None and end_page_id >= 0
  119. else len(dataset) - 1
  120. )
  121. MIN_BATCH_INFERENCE_SIZE = int(os.environ.get('MINERU_MIN_BATCH_INFERENCE_SIZE', 100))
  122. images = []
  123. page_wh_list = []
  124. for index in range(len(dataset)):
  125. if start_page_id <= index <= end_page_id:
  126. page_data = dataset.get_page(index)
  127. img_dict = page_data.get_image()
  128. images.append(img_dict['img'])
  129. page_wh_list.append((img_dict['width'], img_dict['height']))
  130. if len(images) >= MIN_BATCH_INFERENCE_SIZE:
  131. batch_size = MIN_BATCH_INFERENCE_SIZE
  132. batch_images = [images[i:i+batch_size] for i in range(0, len(images), batch_size)]
  133. else:
  134. batch_images = [images]
  135. results = []
  136. for sn, batch_image in enumerate(batch_images):
  137. _, result = may_batch_image_analyze(batch_image, sn, ocr, show_log, lang, layout_model, formula_enable, table_enable)
  138. results.extend(result)
  139. model_json = []
  140. for index in range(len(dataset)):
  141. if start_page_id <= index <= end_page_id:
  142. result = results.pop(0)
  143. page_width, page_height = page_wh_list.pop(0)
  144. else:
  145. result = []
  146. page_height = 0
  147. page_width = 0
  148. page_info = {'page_no': index, 'width': page_width, 'height': page_height}
  149. page_dict = {'layout_dets': result, 'page_info': page_info}
  150. model_json.append(page_dict)
  151. from magic_pdf.operators.models import InferenceResult
  152. return InferenceResult(model_json, dataset)
  153. def batch_doc_analyze(
  154. datasets: list[Dataset],
  155. ocr: bool = False,
  156. show_log: bool = False,
  157. lang=None,
  158. layout_model=None,
  159. formula_enable=None,
  160. table_enable=None,
  161. ):
  162. MIN_BATCH_INFERENCE_SIZE = int(os.environ.get('MINERU_MIN_BATCH_INFERENCE_SIZE', 100))
  163. batch_size = MIN_BATCH_INFERENCE_SIZE
  164. images = []
  165. page_wh_list = []
  166. lang_list = []
  167. lang_s = set()
  168. for dataset in datasets:
  169. for index in range(len(dataset)):
  170. if lang is None or lang == 'auto':
  171. lang_list.append(dataset._lang)
  172. else:
  173. lang_list.append(lang)
  174. lang_s.add(lang_list[-1])
  175. page_data = dataset.get_page(index)
  176. img_dict = page_data.get_image()
  177. images.append(img_dict['img'])
  178. page_wh_list.append((img_dict['width'], img_dict['height']))
  179. batch_images = []
  180. img_idx_list = []
  181. for t_lang in lang_s:
  182. tmp_img_idx_list = []
  183. for i, _lang in enumerate(lang_list):
  184. if _lang == t_lang:
  185. tmp_img_idx_list.append(i)
  186. img_idx_list.extend(tmp_img_idx_list)
  187. if batch_size >= len(tmp_img_idx_list):
  188. batch_images.append((t_lang, [images[j] for j in tmp_img_idx_list]))
  189. else:
  190. slices = [tmp_img_idx_list[k:k+batch_size] for k in range(0, len(tmp_img_idx_list), batch_size)]
  191. for arr in slices:
  192. batch_images.append((t_lang, [images[j] for j in arr]))
  193. unorder_results = []
  194. for sn, (_lang, batch_image) in enumerate(batch_images):
  195. _, result = may_batch_image_analyze(batch_image, sn, ocr, show_log, _lang, layout_model, formula_enable, table_enable)
  196. unorder_results.extend(result)
  197. results = [None] * len(img_idx_list)
  198. for i, idx in enumerate(img_idx_list):
  199. results[idx] = unorder_results[i]
  200. infer_results = []
  201. from magic_pdf.operators.models import InferenceResult
  202. for index in range(len(datasets)):
  203. dataset = datasets[index]
  204. model_json = []
  205. for i in range(len(dataset)):
  206. result = results.pop(0)
  207. page_width, page_height = page_wh_list.pop(0)
  208. page_info = {'page_no': i, 'width': page_width, 'height': page_height}
  209. page_dict = {'layout_dets': result, 'page_info': page_info}
  210. model_json.append(page_dict)
  211. infer_results.append(InferenceResult(model_json, dataset))
  212. return infer_results
  213. def may_batch_image_analyze(
  214. images: list[np.ndarray],
  215. idx: int,
  216. ocr: bool = False,
  217. show_log: bool = False,
  218. lang=None,
  219. layout_model=None,
  220. formula_enable=None,
  221. table_enable=None):
  222. # os.environ['CUDA_VISIBLE_DEVICES'] = str(idx)
  223. # 关闭paddle的信号处理
  224. import paddle
  225. paddle.disable_signal_handler()
  226. from magic_pdf.model.batch_analyze import BatchAnalyze
  227. model_manager = ModelSingleton()
  228. custom_model = model_manager.get_model(
  229. ocr, show_log, lang, layout_model, formula_enable, table_enable
  230. )
  231. batch_analyze = False
  232. batch_ratio = 1
  233. device = get_device()
  234. if str(device).startswith('npu'):
  235. import torch_npu
  236. if torch_npu.npu.is_available():
  237. torch.npu.set_compile_mode(jit_compile=False)
  238. if str(device).startswith('npu') or str(device).startswith('cuda'):
  239. gpu_memory = int(os.getenv('VIRTUAL_VRAM_SIZE', round(get_vram(device))))
  240. if gpu_memory is not None:
  241. if gpu_memory >= 16:
  242. batch_ratio = 16
  243. elif gpu_memory >= 12:
  244. batch_ratio = 8
  245. elif gpu_memory >= 8:
  246. batch_ratio = 4
  247. elif gpu_memory >= 6:
  248. batch_ratio = 2
  249. else:
  250. batch_ratio = 1
  251. logger.info(f'gpu_memory: {gpu_memory} GB, batch_ratio: {batch_ratio}')
  252. batch_analyze = True
  253. elif str(device).startswith('mps'):
  254. batch_analyze = True
  255. doc_analyze_start = time.time()
  256. if batch_analyze:
  257. """# batch analyze
  258. images = []
  259. page_wh_list = []
  260. for index in range(len(dataset)):
  261. if start_page_id <= index <= end_page_id:
  262. page_data = dataset.get_page(index)
  263. img_dict = page_data.get_image()
  264. images.append(img_dict['img'])
  265. page_wh_list.append((img_dict['width'], img_dict['height']))
  266. """
  267. batch_model = BatchAnalyze(model=custom_model, batch_ratio=batch_ratio)
  268. results = batch_model(images)
  269. """
  270. for index in range(len(dataset)):
  271. if start_page_id <= index <= end_page_id:
  272. result = analyze_result.pop(0)
  273. page_width, page_height = page_wh_list.pop(0)
  274. else:
  275. result = []
  276. page_height = 0
  277. page_width = 0
  278. page_info = {'page_no': index, 'width': page_width, 'height': page_height}
  279. page_dict = {'layout_dets': result, 'page_info': page_info}
  280. model_json.append(page_dict)
  281. """
  282. else:
  283. # single analyze
  284. """
  285. for index in range(len(dataset)):
  286. page_data = dataset.get_page(index)
  287. img_dict = page_data.get_image()
  288. img = img_dict['img']
  289. page_width = img_dict['width']
  290. page_height = img_dict['height']
  291. if start_page_id <= index <= end_page_id:
  292. page_start = time.time()
  293. result = custom_model(img)
  294. logger.info(f'-----page_id : {index}, page total time: {round(time.time() - page_start, 2)}-----')
  295. else:
  296. result = []
  297. page_info = {'page_no': index, 'width': page_width, 'height': page_height}
  298. page_dict = {'layout_dets': result, 'page_info': page_info}
  299. model_json.append(page_dict)
  300. """
  301. results = []
  302. for img_idx, img in enumerate(images):
  303. inference_start = time.time()
  304. result = custom_model(img)
  305. logger.info(f'-----image index : {img_idx}, image inference total time: {round(time.time() - inference_start, 2)}-----')
  306. results.append(result)
  307. gc_start = time.time()
  308. clean_memory(get_device())
  309. gc_time = round(time.time() - gc_start, 2)
  310. logger.info(f'gc time: {gc_time}')
  311. doc_analyze_time = round(time.time() - doc_analyze_start, 2)
  312. doc_analyze_speed = round(len(images) / doc_analyze_time, 2)
  313. logger.info(
  314. f'doc analyze time: {round(time.time() - doc_analyze_start, 2)},'
  315. f' speed: {doc_analyze_speed} pages/second'
  316. )
  317. return (idx, results)