doc_analyze_by_custom_model.py 5.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197
  1. import os
  2. import time
  3. # 关闭paddle的信号处理
  4. import paddle
  5. from loguru import logger
  6. paddle.disable_signal_handler()
  7. os.environ['NO_ALBUMENTATIONS_UPDATE'] = '1' # 禁止albumentations检查更新
  8. try:
  9. import torchtext
  10. if torchtext.__version__ >= '0.18.0':
  11. torchtext.disable_torchtext_deprecation_warning()
  12. except ImportError:
  13. pass
  14. import magic_pdf.model as model_config
  15. from magic_pdf.data.dataset import Dataset
  16. from magic_pdf.libs.clean_memory import clean_memory
  17. from magic_pdf.libs.config_reader import (get_device, get_formula_config,
  18. get_layout_config,
  19. get_local_models_dir,
  20. get_table_recog_config)
  21. from magic_pdf.model.model_list import MODEL
  22. from magic_pdf.operators.models import InferenceResult
  23. def dict_compare(d1, d2):
  24. return d1.items() == d2.items()
  25. def remove_duplicates_dicts(lst):
  26. unique_dicts = []
  27. for dict_item in lst:
  28. if not any(
  29. dict_compare(dict_item, existing_dict) for existing_dict in unique_dicts
  30. ):
  31. unique_dicts.append(dict_item)
  32. return unique_dicts
  33. class ModelSingleton:
  34. _instance = None
  35. _models = {}
  36. def __new__(cls, *args, **kwargs):
  37. if cls._instance is None:
  38. cls._instance = super().__new__(cls)
  39. return cls._instance
  40. def get_model(
  41. self,
  42. ocr: bool,
  43. show_log: bool,
  44. lang=None,
  45. layout_model=None,
  46. formula_enable=None,
  47. table_enable=None,
  48. ):
  49. key = (ocr, show_log, lang, layout_model, formula_enable, table_enable)
  50. if key not in self._models:
  51. self._models[key] = custom_model_init(
  52. ocr=ocr,
  53. show_log=show_log,
  54. lang=lang,
  55. layout_model=layout_model,
  56. formula_enable=formula_enable,
  57. table_enable=table_enable,
  58. )
  59. return self._models[key]
  60. def custom_model_init(
  61. ocr: bool = False,
  62. show_log: bool = False,
  63. lang=None,
  64. layout_model=None,
  65. formula_enable=None,
  66. table_enable=None,
  67. ):
  68. model = None
  69. if model_config.__model_mode__ == 'lite':
  70. logger.warning(
  71. 'The Lite mode is provided for developers to conduct testing only, and the output quality is '
  72. 'not guaranteed to be reliable.'
  73. )
  74. model = MODEL.Paddle
  75. elif model_config.__model_mode__ == 'full':
  76. model = MODEL.PEK
  77. if model_config.__use_inside_model__:
  78. model_init_start = time.time()
  79. if model == MODEL.Paddle:
  80. from magic_pdf.model.pp_structure_v2 import CustomPaddleModel
  81. custom_model = CustomPaddleModel(ocr=ocr, show_log=show_log, lang=lang)
  82. elif model == MODEL.PEK:
  83. from magic_pdf.model.pdf_extract_kit import CustomPEKModel
  84. # 从配置文件读取model-dir和device
  85. local_models_dir = get_local_models_dir()
  86. device = get_device()
  87. layout_config = get_layout_config()
  88. if layout_model is not None:
  89. layout_config['model'] = layout_model
  90. formula_config = get_formula_config()
  91. if formula_enable is not None:
  92. formula_config['enable'] = formula_enable
  93. table_config = get_table_recog_config()
  94. if table_enable is not None:
  95. table_config['enable'] = table_enable
  96. model_input = {
  97. 'ocr': ocr,
  98. 'show_log': show_log,
  99. 'models_dir': local_models_dir,
  100. 'device': device,
  101. 'table_config': table_config,
  102. 'layout_config': layout_config,
  103. 'formula_config': formula_config,
  104. 'lang': lang,
  105. }
  106. custom_model = CustomPEKModel(**model_input)
  107. else:
  108. logger.error('Not allow model_name!')
  109. exit(1)
  110. model_init_cost = time.time() - model_init_start
  111. logger.info(f'model init cost: {model_init_cost}')
  112. else:
  113. logger.error('use_inside_model is False, not allow to use inside model')
  114. exit(1)
  115. return custom_model
  116. def doc_analyze(
  117. dataset: Dataset,
  118. ocr: bool = False,
  119. show_log: bool = False,
  120. start_page_id=0,
  121. end_page_id=None,
  122. lang=None,
  123. layout_model=None,
  124. formula_enable=None,
  125. table_enable=None,
  126. ) -> InferenceResult:
  127. model_manager = ModelSingleton()
  128. custom_model = model_manager.get_model(
  129. ocr, show_log, lang, layout_model, formula_enable, table_enable
  130. )
  131. model_json = []
  132. doc_analyze_start = time.time()
  133. if end_page_id is None:
  134. end_page_id = len(dataset)
  135. for index in range(len(dataset)):
  136. page_data = dataset.get_page(index)
  137. img_dict = page_data.get_image()
  138. img = img_dict['img']
  139. page_width = img_dict['width']
  140. page_height = img_dict['height']
  141. if start_page_id <= index <= end_page_id:
  142. page_start = time.time()
  143. result = custom_model(img)
  144. logger.info(f'-----page_id : {index}, page total time: {round(time.time() - page_start, 2)}-----')
  145. else:
  146. result = []
  147. page_info = {'page_no': index, 'height': page_height, 'width': page_width}
  148. page_dict = {'layout_dets': result, 'page_info': page_info}
  149. model_json.append(page_dict)
  150. gc_start = time.time()
  151. clean_memory()
  152. gc_time = round(time.time() - gc_start, 2)
  153. logger.info(f'gc time: {gc_time}')
  154. doc_analyze_time = round(time.time() - doc_analyze_start, 2)
  155. doc_analyze_speed = round((end_page_id + 1 - start_page_id) / doc_analyze_time, 2)
  156. logger.info(
  157. f'doc analyze time: {round(time.time() - doc_analyze_start, 2)},'
  158. f' speed: {doc_analyze_speed} pages/second'
  159. )
  160. return InferenceResult(model_json, dataset)