batch_analyze.py 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277
  1. import time
  2. import cv2
  3. import numpy as np
  4. import torch
  5. from loguru import logger
  6. from PIL import Image
  7. from magic_pdf.config.constants import MODEL_NAME
  8. # from magic_pdf.config.exceptions import CUDA_NOT_AVAILABLE
  9. # from magic_pdf.data.dataset import Dataset
  10. # from magic_pdf.libs.clean_memory import clean_memory
  11. # from magic_pdf.libs.config_reader import get_device
  12. # from magic_pdf.model.doc_analyze_by_custom_model import ModelSingleton
  13. from magic_pdf.model.pdf_extract_kit import CustomPEKModel
  14. from magic_pdf.model.sub_modules.model_utils import (
  15. clean_vram, crop_img, get_res_list_from_layout_res)
  16. from magic_pdf.model.sub_modules.ocr.paddleocr.ocr_utils import (
  17. get_adjusted_mfdetrec_res, get_ocr_result_list)
  18. # from magic_pdf.operators.models import InferenceResult
  19. YOLO_LAYOUT_BASE_BATCH_SIZE = 4
  20. MFD_BASE_BATCH_SIZE = 1
  21. MFR_BASE_BATCH_SIZE = 16
  22. class BatchAnalyze:
  23. def __init__(self, model: CustomPEKModel, batch_ratio: int):
  24. self.model = model
  25. self.batch_ratio = batch_ratio
  26. def __call__(self, images: list) -> list:
  27. images_layout_res = []
  28. layout_start_time = time.time()
  29. if self.model.layout_model_name == MODEL_NAME.LAYOUTLMv3:
  30. # layoutlmv3
  31. for image in images:
  32. layout_res = self.model.layout_model(image, ignore_catids=[])
  33. images_layout_res.append(layout_res)
  34. elif self.model.layout_model_name == MODEL_NAME.DocLayout_YOLO:
  35. # doclayout_yolo
  36. layout_images = []
  37. modified_images = []
  38. for image_index, image in enumerate(images):
  39. pil_img = Image.fromarray(image)
  40. # width, height = pil_img.size
  41. # if height > width:
  42. # input_res = {'poly': [0, 0, width, 0, width, height, 0, height]}
  43. # new_image, useful_list = crop_img(
  44. # input_res, pil_img, crop_paste_x=width // 2, crop_paste_y=0
  45. # )
  46. # layout_images.append(new_image)
  47. # modified_images.append([image_index, useful_list])
  48. # else:
  49. layout_images.append(pil_img)
  50. images_layout_res += self.model.layout_model.batch_predict(
  51. layout_images, self.batch_ratio * YOLO_LAYOUT_BASE_BATCH_SIZE
  52. )
  53. for image_index, useful_list in modified_images:
  54. for res in images_layout_res[image_index]:
  55. for i in range(len(res['poly'])):
  56. if i % 2 == 0:
  57. res['poly'][i] = (
  58. res['poly'][i] - useful_list[0] + useful_list[2]
  59. )
  60. else:
  61. res['poly'][i] = (
  62. res['poly'][i] - useful_list[1] + useful_list[3]
  63. )
  64. logger.info(
  65. f'layout time: {round(time.time() - layout_start_time, 2)}, image num: {len(images)}'
  66. )
  67. if self.model.apply_formula:
  68. # 公式检测
  69. mfd_start_time = time.time()
  70. images_mfd_res = self.model.mfd_model.batch_predict(
  71. images, self.batch_ratio * MFD_BASE_BATCH_SIZE
  72. )
  73. logger.info(
  74. f'mfd time: {round(time.time() - mfd_start_time, 2)}, image num: {len(images)}'
  75. )
  76. # 公式识别
  77. mfr_start_time = time.time()
  78. images_formula_list = self.model.mfr_model.batch_predict(
  79. images_mfd_res,
  80. images,
  81. batch_size=self.batch_ratio * MFR_BASE_BATCH_SIZE,
  82. )
  83. mfr_count = 0
  84. for image_index in range(len(images)):
  85. images_layout_res[image_index] += images_formula_list[image_index]
  86. mfr_count += len(images_formula_list[image_index])
  87. logger.info(
  88. f'mfr time: {round(time.time() - mfr_start_time, 2)}, image num: {mfr_count}'
  89. )
  90. # 清理显存
  91. clean_vram(self.model.device, vram_threshold=8)
  92. ocr_time = 0
  93. ocr_count = 0
  94. table_time = 0
  95. table_count = 0
  96. # reference: magic_pdf/model/doc_analyze_by_custom_model.py:doc_analyze
  97. for index in range(len(images)):
  98. layout_res = images_layout_res[index]
  99. pil_img = Image.fromarray(images[index])
  100. ocr_res_list, table_res_list, single_page_mfdetrec_res = (
  101. get_res_list_from_layout_res(layout_res)
  102. )
  103. # ocr识别
  104. ocr_start = time.time()
  105. # Process each area that requires OCR processing
  106. for res in ocr_res_list:
  107. new_image, useful_list = crop_img(
  108. res, pil_img, crop_paste_x=50, crop_paste_y=50
  109. )
  110. adjusted_mfdetrec_res = get_adjusted_mfdetrec_res(
  111. single_page_mfdetrec_res, useful_list
  112. )
  113. # OCR recognition
  114. new_image = cv2.cvtColor(np.asarray(new_image), cv2.COLOR_RGB2BGR)
  115. if self.model.apply_ocr:
  116. ocr_res = self.model.ocr_model.ocr(
  117. new_image, mfd_res=adjusted_mfdetrec_res
  118. )[0]
  119. else:
  120. ocr_res = self.model.ocr_model.ocr(
  121. new_image, mfd_res=adjusted_mfdetrec_res, rec=False
  122. )[0]
  123. # Integration results
  124. if ocr_res:
  125. ocr_result_list = get_ocr_result_list(ocr_res, useful_list)
  126. layout_res.extend(ocr_result_list)
  127. ocr_time += time.time() - ocr_start
  128. ocr_count += len(ocr_res_list)
  129. # 表格识别 table recognition
  130. if self.model.apply_table:
  131. table_start = time.time()
  132. for res in table_res_list:
  133. new_image, _ = crop_img(res, pil_img)
  134. single_table_start_time = time.time()
  135. html_code = None
  136. if self.model.table_model_name == MODEL_NAME.STRUCT_EQTABLE:
  137. with torch.no_grad():
  138. table_result = self.model.table_model.predict(
  139. new_image, 'html'
  140. )
  141. if len(table_result) > 0:
  142. html_code = table_result[0]
  143. elif self.model.table_model_name == MODEL_NAME.TABLE_MASTER:
  144. html_code = self.model.table_model.img2html(new_image)
  145. elif self.model.table_model_name == MODEL_NAME.RAPID_TABLE:
  146. html_code, table_cell_bboxes, logic_points, elapse = (
  147. self.model.table_model.predict(new_image)
  148. )
  149. run_time = time.time() - single_table_start_time
  150. if run_time > self.model.table_max_time:
  151. logger.warning(
  152. f'table recognition processing exceeds max time {self.model.table_max_time}s'
  153. )
  154. # 判断是否返回正常
  155. if html_code:
  156. expected_ending = html_code.strip().endswith(
  157. '</html>'
  158. ) or html_code.strip().endswith('</table>')
  159. if expected_ending:
  160. res['html'] = html_code
  161. else:
  162. logger.warning(
  163. 'table recognition processing fails, not found expected HTML table end'
  164. )
  165. else:
  166. logger.warning(
  167. 'table recognition processing fails, not get html return'
  168. )
  169. table_time += time.time() - table_start
  170. table_count += len(table_res_list)
  171. if self.model.apply_ocr:
  172. logger.info(f'ocr time: {round(ocr_time, 2)}, image num: {ocr_count}')
  173. else:
  174. logger.info(f'det time: {round(ocr_time, 2)}, image num: {ocr_count}')
  175. if self.model.apply_table:
  176. logger.info(f'table time: {round(table_time, 2)}, image num: {table_count}')
  177. return images_layout_res
  178. # def doc_batch_analyze(
  179. # dataset: Dataset,
  180. # ocr: bool = False,
  181. # show_log: bool = False,
  182. # start_page_id=0,
  183. # end_page_id=None,
  184. # lang=None,
  185. # layout_model=None,
  186. # formula_enable=None,
  187. # table_enable=None,
  188. # batch_ratio: int | None = None,
  189. # ) -> InferenceResult:
  190. # """Perform batch analysis on a document dataset.
  191. #
  192. # Args:
  193. # dataset (Dataset): The dataset containing document pages to be analyzed.
  194. # ocr (bool, optional): Flag to enable OCR (Optical Character Recognition). Defaults to False.
  195. # show_log (bool, optional): Flag to enable logging. Defaults to False.
  196. # start_page_id (int, optional): The starting page ID for analysis. Defaults to 0.
  197. # end_page_id (int, optional): The ending page ID for analysis. Defaults to None, which means analyze till the last page.
  198. # lang (str, optional): Language for OCR. Defaults to None.
  199. # layout_model (optional): Layout model to be used for analysis. Defaults to None.
  200. # formula_enable (optional): Flag to enable formula detection. Defaults to None.
  201. # table_enable (optional): Flag to enable table detection. Defaults to None.
  202. # batch_ratio (int | None, optional): Ratio for batch processing. Defaults to None, which sets it to 1.
  203. #
  204. # Raises:
  205. # CUDA_NOT_AVAILABLE: If CUDA is not available, raises an exception as batch analysis is not supported in CPU mode.
  206. #
  207. # Returns:
  208. # InferenceResult: The result of the batch analysis containing the analyzed data and the dataset.
  209. # """
  210. #
  211. # if not torch.cuda.is_available():
  212. # raise CUDA_NOT_AVAILABLE('batch analyze not support in CPU mode')
  213. #
  214. # lang = None if lang == '' else lang
  215. # # TODO: auto detect batch size
  216. # batch_ratio = 1 if batch_ratio is None else batch_ratio
  217. # end_page_id = end_page_id if end_page_id else len(dataset)
  218. #
  219. # model_manager = ModelSingleton()
  220. # custom_model: CustomPEKModel = model_manager.get_model(
  221. # ocr, show_log, lang, layout_model, formula_enable, table_enable
  222. # )
  223. # batch_model = BatchAnalyze(model=custom_model, batch_ratio=batch_ratio)
  224. #
  225. # model_json = []
  226. #
  227. # # batch analyze
  228. # images = []
  229. # for index in range(len(dataset)):
  230. # if start_page_id <= index <= end_page_id:
  231. # page_data = dataset.get_page(index)
  232. # img_dict = page_data.get_image()
  233. # images.append(img_dict['img'])
  234. # analyze_result = batch_model(images)
  235. #
  236. # for index in range(len(dataset)):
  237. # page_data = dataset.get_page(index)
  238. # img_dict = page_data.get_image()
  239. # page_width = img_dict['width']
  240. # page_height = img_dict['height']
  241. # if start_page_id <= index <= end_page_id:
  242. # result = analyze_result.pop(0)
  243. # else:
  244. # result = []
  245. #
  246. # page_info = {'page_no': index, 'height': page_height, 'width': page_width}
  247. # page_dict = {'layout_dets': result, 'page_info': page_info}
  248. # model_json.append(page_dict)
  249. #
  250. # # TODO: clean memory when gpu memory is not enough
  251. # clean_memory_start_time = time.time()
  252. # clean_memory(get_device())
  253. # logger.info(f'clean memory time: {round(time.time() - clean_memory_start_time, 2)}')
  254. #
  255. # return InferenceResult(model_json, dataset)