doc_analyze_by_custom_model.py 7.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242
  1. import os
  2. import time
  3. import torch
  4. # 关闭paddle的信号处理
  5. import paddle
  6. paddle.disable_signal_handler()
  7. from loguru import logger
  8. from magic_pdf.model.batch_analyze import BatchAnalyze
  9. from magic_pdf.model.sub_modules.model_utils import get_vram
  10. os.environ['NO_ALBUMENTATIONS_UPDATE'] = '1' # 禁止albumentations检查更新
  11. try:
  12. import torchtext
  13. if torchtext.__version__ >= '0.18.0':
  14. torchtext.disable_torchtext_deprecation_warning()
  15. except ImportError:
  16. pass
  17. import magic_pdf.model as model_config
  18. from magic_pdf.data.dataset import Dataset
  19. from magic_pdf.libs.clean_memory import clean_memory
  20. from magic_pdf.libs.config_reader import (get_device, get_formula_config,
  21. get_layout_config,
  22. get_local_models_dir,
  23. get_table_recog_config)
  24. from magic_pdf.model.model_list import MODEL
  25. from magic_pdf.operators.models import InferenceResult
  26. class ModelSingleton:
  27. _instance = None
  28. _models = {}
  29. def __new__(cls, *args, **kwargs):
  30. if cls._instance is None:
  31. cls._instance = super().__new__(cls)
  32. return cls._instance
  33. def get_model(
  34. self,
  35. ocr: bool,
  36. show_log: bool,
  37. lang=None,
  38. layout_model=None,
  39. formula_enable=None,
  40. table_enable=None,
  41. ):
  42. key = (ocr, show_log, lang, layout_model, formula_enable, table_enable)
  43. if key not in self._models:
  44. self._models[key] = custom_model_init(
  45. ocr=ocr,
  46. show_log=show_log,
  47. lang=lang,
  48. layout_model=layout_model,
  49. formula_enable=formula_enable,
  50. table_enable=table_enable,
  51. )
  52. return self._models[key]
  53. def custom_model_init(
  54. ocr: bool = False,
  55. show_log: bool = False,
  56. lang=None,
  57. layout_model=None,
  58. formula_enable=None,
  59. table_enable=None,
  60. ):
  61. model = None
  62. if model_config.__model_mode__ == 'lite':
  63. logger.warning(
  64. 'The Lite mode is provided for developers to conduct testing only, and the output quality is '
  65. 'not guaranteed to be reliable.'
  66. )
  67. model = MODEL.Paddle
  68. elif model_config.__model_mode__ == 'full':
  69. model = MODEL.PEK
  70. if model_config.__use_inside_model__:
  71. model_init_start = time.time()
  72. if model == MODEL.Paddle:
  73. from magic_pdf.model.pp_structure_v2 import CustomPaddleModel
  74. custom_model = CustomPaddleModel(ocr=ocr, show_log=show_log, lang=lang)
  75. elif model == MODEL.PEK:
  76. from magic_pdf.model.pdf_extract_kit import CustomPEKModel
  77. # 从配置文件读取model-dir和device
  78. local_models_dir = get_local_models_dir()
  79. device = get_device()
  80. layout_config = get_layout_config()
  81. if layout_model is not None:
  82. layout_config['model'] = layout_model
  83. formula_config = get_formula_config()
  84. if formula_enable is not None:
  85. formula_config['enable'] = formula_enable
  86. table_config = get_table_recog_config()
  87. if table_enable is not None:
  88. table_config['enable'] = table_enable
  89. model_input = {
  90. 'ocr': ocr,
  91. 'show_log': show_log,
  92. 'models_dir': local_models_dir,
  93. 'device': device,
  94. 'table_config': table_config,
  95. 'layout_config': layout_config,
  96. 'formula_config': formula_config,
  97. 'lang': lang,
  98. }
  99. custom_model = CustomPEKModel(**model_input)
  100. else:
  101. logger.error('Not allow model_name!')
  102. exit(1)
  103. model_init_cost = time.time() - model_init_start
  104. logger.info(f'model init cost: {model_init_cost}')
  105. else:
  106. logger.error('use_inside_model is False, not allow to use inside model')
  107. exit(1)
  108. return custom_model
  109. def doc_analyze(
  110. dataset: Dataset,
  111. ocr: bool = False,
  112. show_log: bool = False,
  113. start_page_id=0,
  114. end_page_id=None,
  115. lang=None,
  116. layout_model=None,
  117. formula_enable=None,
  118. table_enable=None,
  119. ) -> InferenceResult:
  120. end_page_id = end_page_id if end_page_id else len(dataset) - 1
  121. model_manager = ModelSingleton()
  122. custom_model = model_manager.get_model(
  123. ocr, show_log, lang, layout_model, formula_enable, table_enable
  124. )
  125. batch_analyze = False
  126. device = get_device()
  127. npu_support = False
  128. if str(device).startswith("npu"):
  129. import torch_npu
  130. if torch_npu.npu.is_available():
  131. npu_support = True
  132. if torch.cuda.is_available() and device != 'cpu' or npu_support:
  133. gpu_memory = int(os.getenv("VIRTUAL_VRAM_SIZE", round(get_vram(device))))
  134. if gpu_memory is not None and gpu_memory >= 8:
  135. if gpu_memory >= 40:
  136. batch_ratio = 32
  137. elif gpu_memory >=20:
  138. batch_ratio = 16
  139. elif gpu_memory >= 16:
  140. batch_ratio = 8
  141. elif gpu_memory >= 10:
  142. batch_ratio = 4
  143. else:
  144. batch_ratio = 2
  145. logger.info(f'gpu_memory: {gpu_memory} GB, batch_ratio: {batch_ratio}')
  146. batch_model = BatchAnalyze(model=custom_model, batch_ratio=batch_ratio)
  147. batch_analyze = True
  148. model_json = []
  149. doc_analyze_start = time.time()
  150. if batch_analyze:
  151. # batch analyze
  152. images = []
  153. for index in range(len(dataset)):
  154. if start_page_id <= index <= end_page_id:
  155. page_data = dataset.get_page(index)
  156. img_dict = page_data.get_image()
  157. images.append(img_dict['img'])
  158. analyze_result = batch_model(images)
  159. for index in range(len(dataset)):
  160. page_data = dataset.get_page(index)
  161. img_dict = page_data.get_image()
  162. page_width = img_dict['width']
  163. page_height = img_dict['height']
  164. if start_page_id <= index <= end_page_id:
  165. result = analyze_result.pop(0)
  166. else:
  167. result = []
  168. page_info = {'page_no': index, 'height': page_height, 'width': page_width}
  169. page_dict = {'layout_dets': result, 'page_info': page_info}
  170. model_json.append(page_dict)
  171. else:
  172. # single analyze
  173. for index in range(len(dataset)):
  174. page_data = dataset.get_page(index)
  175. img_dict = page_data.get_image()
  176. img = img_dict['img']
  177. page_width = img_dict['width']
  178. page_height = img_dict['height']
  179. if start_page_id <= index <= end_page_id:
  180. page_start = time.time()
  181. result = custom_model(img)
  182. logger.info(f'-----page_id : {index}, page total time: {round(time.time() - page_start, 2)}-----')
  183. else:
  184. result = []
  185. page_info = {'page_no': index, 'height': page_height, 'width': page_width}
  186. page_dict = {'layout_dets': result, 'page_info': page_info}
  187. model_json.append(page_dict)
  188. gc_start = time.time()
  189. clean_memory(get_device())
  190. gc_time = round(time.time() - gc_start, 2)
  191. logger.info(f'gc time: {gc_time}')
  192. doc_analyze_time = round(time.time() - doc_analyze_start, 2)
  193. doc_analyze_speed = round((end_page_id + 1 - start_page_id) / doc_analyze_time, 2)
  194. logger.info(
  195. f'doc analyze time: {round(time.time() - doc_analyze_start, 2)},'
  196. f' speed: {doc_analyze_speed} pages/second'
  197. )
  198. return InferenceResult(model_json, dataset)