doc_analyze_by_custom_model.py 8.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256
  1. import os
  2. import time
  3. # 关闭paddle的信号处理
  4. import paddle
  5. import torch
  6. from loguru import logger
  7. from magic_pdf.model.batch_analyze import BatchAnalyze
  8. from magic_pdf.model.sub_modules.model_utils import get_vram
  9. paddle.disable_signal_handler()
  10. os.environ['NO_ALBUMENTATIONS_UPDATE'] = '1' # 禁止albumentations检查更新
  11. try:
  12. import torchtext
  13. if torchtext.__version__ >= '0.18.0':
  14. torchtext.disable_torchtext_deprecation_warning()
  15. except ImportError:
  16. pass
  17. import magic_pdf.model as model_config
  18. from magic_pdf.data.dataset import Dataset
  19. from magic_pdf.libs.clean_memory import clean_memory
  20. from magic_pdf.libs.config_reader import (get_device, get_formula_config,
  21. get_layout_config,
  22. get_local_models_dir,
  23. get_table_recog_config)
  24. from magic_pdf.model.model_list import MODEL
  25. from magic_pdf.operators.models import InferenceResult
  26. def dict_compare(d1, d2):
  27. return d1.items() == d2.items()
  28. def remove_duplicates_dicts(lst):
  29. unique_dicts = []
  30. for dict_item in lst:
  31. if not any(
  32. dict_compare(dict_item, existing_dict) for existing_dict in unique_dicts
  33. ):
  34. unique_dicts.append(dict_item)
  35. return unique_dicts
  36. class ModelSingleton:
  37. _instance = None
  38. _models = {}
  39. def __new__(cls, *args, **kwargs):
  40. if cls._instance is None:
  41. cls._instance = super().__new__(cls)
  42. return cls._instance
  43. def get_model(
  44. self,
  45. ocr: bool,
  46. show_log: bool,
  47. lang=None,
  48. layout_model=None,
  49. formula_enable=None,
  50. table_enable=None,
  51. ):
  52. key = (ocr, show_log, lang, layout_model, formula_enable, table_enable)
  53. if key not in self._models:
  54. self._models[key] = custom_model_init(
  55. ocr=ocr,
  56. show_log=show_log,
  57. lang=lang,
  58. layout_model=layout_model,
  59. formula_enable=formula_enable,
  60. table_enable=table_enable,
  61. )
  62. return self._models[key]
  63. def custom_model_init(
  64. ocr: bool = False,
  65. show_log: bool = False,
  66. lang=None,
  67. layout_model=None,
  68. formula_enable=None,
  69. table_enable=None,
  70. ):
  71. model = None
  72. if model_config.__model_mode__ == 'lite':
  73. logger.warning(
  74. 'The Lite mode is provided for developers to conduct testing only, and the output quality is '
  75. 'not guaranteed to be reliable.'
  76. )
  77. model = MODEL.Paddle
  78. elif model_config.__model_mode__ == 'full':
  79. model = MODEL.PEK
  80. if model_config.__use_inside_model__:
  81. model_init_start = time.time()
  82. if model == MODEL.Paddle:
  83. from magic_pdf.model.pp_structure_v2 import CustomPaddleModel
  84. custom_model = CustomPaddleModel(ocr=ocr, show_log=show_log, lang=lang)
  85. elif model == MODEL.PEK:
  86. from magic_pdf.model.pdf_extract_kit import CustomPEKModel
  87. # 从配置文件读取model-dir和device
  88. local_models_dir = get_local_models_dir()
  89. device = get_device()
  90. layout_config = get_layout_config()
  91. if layout_model is not None:
  92. layout_config['model'] = layout_model
  93. formula_config = get_formula_config()
  94. if formula_enable is not None:
  95. formula_config['enable'] = formula_enable
  96. table_config = get_table_recog_config()
  97. if table_enable is not None:
  98. table_config['enable'] = table_enable
  99. model_input = {
  100. 'ocr': ocr,
  101. 'show_log': show_log,
  102. 'models_dir': local_models_dir,
  103. 'device': device,
  104. 'table_config': table_config,
  105. 'layout_config': layout_config,
  106. 'formula_config': formula_config,
  107. 'lang': lang,
  108. }
  109. custom_model = CustomPEKModel(**model_input)
  110. else:
  111. logger.error('Not allow model_name!')
  112. exit(1)
  113. model_init_cost = time.time() - model_init_start
  114. logger.info(f'model init cost: {model_init_cost}')
  115. else:
  116. logger.error('use_inside_model is False, not allow to use inside model')
  117. exit(1)
  118. return custom_model
  119. def doc_analyze(
  120. dataset: Dataset,
  121. ocr: bool = False,
  122. show_log: bool = False,
  123. start_page_id=0,
  124. end_page_id=None,
  125. lang=None,
  126. layout_model=None,
  127. formula_enable=None,
  128. table_enable=None,
  129. ) -> InferenceResult:
  130. end_page_id = end_page_id if end_page_id else len(dataset) - 1
  131. model_manager = ModelSingleton()
  132. custom_model = model_manager.get_model(
  133. ocr, show_log, lang, layout_model, formula_enable, table_enable
  134. )
  135. batch_analyze = False
  136. device = get_device()
  137. npu_support = False
  138. if str(device).startswith("npu"):
  139. import torch_npu
  140. if torch_npu.npu.is_available():
  141. npu_support = True
  142. if torch.cuda.is_available() and device != 'cpu' or npu_support:
  143. gpu_memory = int(os.getenv("VIRTUAL_VRAM_SIZE", round(get_vram(device))))
  144. if gpu_memory is not None and gpu_memory >= 8:
  145. if 8 <= gpu_memory < 10:
  146. batch_ratio = 2
  147. elif 10 <= gpu_memory <= 12:
  148. batch_ratio = 4
  149. elif 12 < gpu_memory <= 16:
  150. batch_ratio = 8
  151. elif 16 < gpu_memory <= 32:
  152. batch_ratio = 16
  153. else:
  154. batch_ratio = 32
  155. if batch_ratio >= 1:
  156. logger.info(f'gpu_memory: {gpu_memory} GB, batch_ratio: {batch_ratio}')
  157. batch_model = BatchAnalyze(model=custom_model, batch_ratio=batch_ratio)
  158. batch_analyze = True
  159. model_json = []
  160. doc_analyze_start = time.time()
  161. if batch_analyze:
  162. # batch analyze
  163. images = []
  164. for index in range(len(dataset)):
  165. if start_page_id <= index <= end_page_id:
  166. page_data = dataset.get_page(index)
  167. img_dict = page_data.get_image()
  168. images.append(img_dict['img'])
  169. analyze_result = batch_model(images)
  170. for index in range(len(dataset)):
  171. page_data = dataset.get_page(index)
  172. img_dict = page_data.get_image()
  173. page_width = img_dict['width']
  174. page_height = img_dict['height']
  175. if start_page_id <= index <= end_page_id:
  176. result = analyze_result.pop(0)
  177. else:
  178. result = []
  179. page_info = {'page_no': index, 'height': page_height, 'width': page_width}
  180. page_dict = {'layout_dets': result, 'page_info': page_info}
  181. model_json.append(page_dict)
  182. else:
  183. # single analyze
  184. for index in range(len(dataset)):
  185. page_data = dataset.get_page(index)
  186. img_dict = page_data.get_image()
  187. img = img_dict['img']
  188. page_width = img_dict['width']
  189. page_height = img_dict['height']
  190. if start_page_id <= index <= end_page_id:
  191. page_start = time.time()
  192. result = custom_model(img)
  193. logger.info(f'-----page_id : {index}, page total time: {round(time.time() - page_start, 2)}-----')
  194. else:
  195. result = []
  196. page_info = {'page_no': index, 'height': page_height, 'width': page_width}
  197. page_dict = {'layout_dets': result, 'page_info': page_info}
  198. model_json.append(page_dict)
  199. gc_start = time.time()
  200. clean_memory(get_device())
  201. gc_time = round(time.time() - gc_start, 2)
  202. logger.info(f'gc time: {gc_time}')
  203. doc_analyze_time = round(time.time() - doc_analyze_start, 2)
  204. doc_analyze_speed = round((end_page_id + 1 - start_page_id) / doc_analyze_time, 2)
  205. logger.info(
  206. f'doc analyze time: {round(time.time() - doc_analyze_start, 2)},'
  207. f' speed: {doc_analyze_speed} pages/second'
  208. )
  209. return InferenceResult(model_json, dataset)