batch_analyze.py 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280
  1. import time
  2. import cv2
  3. import numpy as np
  4. import torch
  5. from loguru import logger
  6. from PIL import Image
  7. from magic_pdf.config.constants import MODEL_NAME
  8. from magic_pdf.config.exceptions import CUDA_NOT_AVAILABLE
  9. from magic_pdf.data.dataset import Dataset
  10. from magic_pdf.libs.clean_memory import clean_memory
  11. from magic_pdf.model.doc_analyze_by_custom_model import ModelSingleton
  12. from magic_pdf.model.operators import InferenceResult
  13. from magic_pdf.model.pdf_extract_kit import CustomPEKModel
  14. from magic_pdf.model.sub_modules.model_utils import (
  15. clean_vram,
  16. crop_img,
  17. get_res_list_from_layout_res,
  18. )
  19. from magic_pdf.model.sub_modules.ocr.paddleocr.ocr_utils import (
  20. get_adjusted_mfdetrec_res,
  21. get_ocr_result_list,
  22. )
  23. YOLO_LAYOUT_BASE_BATCH_SIZE = 4
  24. MFD_BASE_BATCH_SIZE = 1
  25. MFR_BASE_BATCH_SIZE = 16
  26. class BatchAnalyze:
  27. def __init__(self, model: CustomPEKModel, batch_ratio: int):
  28. self.model = model
  29. self.batch_ratio = batch_ratio
  30. def __call__(self, images: list) -> list:
  31. images_layout_res = []
  32. layout_start_time = time.time()
  33. if self.model.layout_model_name == MODEL_NAME.LAYOUTLMv3:
  34. # layoutlmv3
  35. for image in images:
  36. layout_res = self.model.layout_model(image, ignore_catids=[])
  37. images_layout_res.append(layout_res)
  38. elif self.model.layout_model_name == MODEL_NAME.DocLayout_YOLO:
  39. # doclayout_yolo
  40. layout_images = []
  41. modified_images = []
  42. for image_index, image in enumerate(images):
  43. pil_img = Image.fromarray(image)
  44. width, height = pil_img.size
  45. if height > width:
  46. input_res = {"poly": [0, 0, width, 0, width, height, 0, height]}
  47. new_image, useful_list = crop_img(
  48. input_res, pil_img, crop_paste_x=width // 2, crop_paste_y=0
  49. )
  50. layout_images.append(new_image)
  51. modified_images.append([image_index, useful_list])
  52. else:
  53. layout_images.append(pil_img)
  54. images_layout_res += self.model.layout_model.batch_predict(
  55. layout_images, self.batch_ratio * YOLO_LAYOUT_BASE_BATCH_SIZE
  56. )
  57. for image_index, useful_list in modified_images:
  58. for res in images_layout_res[image_index]:
  59. for i in range(len(res["poly"])):
  60. if i % 2 == 0:
  61. res["poly"][i] = (
  62. res["poly"][i] - useful_list[0] + useful_list[2]
  63. )
  64. else:
  65. res["poly"][i] = (
  66. res["poly"][i] - useful_list[1] + useful_list[3]
  67. )
  68. logger.info(
  69. f"layout time: {round(time.time() - layout_start_time, 2)}, image num: {len(images)}"
  70. )
  71. if self.model.apply_formula:
  72. # 公式检测
  73. mfd_start_time = time.time()
  74. images_mfd_res = self.model.mfd_model.batch_predict(
  75. images, self.batch_ratio * MFD_BASE_BATCH_SIZE
  76. )
  77. logger.info(
  78. f"mfd time: {round(time.time() - mfd_start_time, 2)}, image num: {len(images)}"
  79. )
  80. # 公式识别
  81. mfr_start_time = time.time()
  82. images_formula_list = self.model.mfr_model.batch_predict(
  83. images_mfd_res,
  84. images,
  85. batch_size=self.batch_ratio * MFR_BASE_BATCH_SIZE,
  86. )
  87. for image_index in range(len(images)):
  88. images_layout_res[image_index] += images_formula_list[image_index]
  89. logger.info(
  90. f"mfr time: {round(time.time() - mfr_start_time, 2)}, image num: {len(images)}"
  91. )
  92. # 清理显存
  93. clean_vram(self.model.device, vram_threshold=8)
  94. ocr_time = 0
  95. ocr_count = 0
  96. table_time = 0
  97. table_count = 0
  98. # reference: magic_pdf/model/doc_analyze_by_custom_model.py:doc_analyze
  99. for index in range(len(images)):
  100. layout_res = images_layout_res[index]
  101. pil_img = Image.fromarray(images[index])
  102. ocr_res_list, table_res_list, single_page_mfdetrec_res = (
  103. get_res_list_from_layout_res(layout_res)
  104. )
  105. # ocr识别
  106. ocr_start = time.time()
  107. # Process each area that requires OCR processing
  108. for res in ocr_res_list:
  109. new_image, useful_list = crop_img(
  110. res, pil_img, crop_paste_x=50, crop_paste_y=50
  111. )
  112. adjusted_mfdetrec_res = get_adjusted_mfdetrec_res(
  113. single_page_mfdetrec_res, useful_list
  114. )
  115. # OCR recognition
  116. new_image = cv2.cvtColor(np.asarray(new_image), cv2.COLOR_RGB2BGR)
  117. if self.model.apply_ocr:
  118. ocr_res = self.model.ocr_model.ocr(
  119. new_image, mfd_res=adjusted_mfdetrec_res
  120. )[0]
  121. else:
  122. ocr_res = self.model.ocr_model.ocr(
  123. new_image, mfd_res=adjusted_mfdetrec_res, rec=False
  124. )[0]
  125. # Integration results
  126. if ocr_res:
  127. ocr_result_list = get_ocr_result_list(ocr_res, useful_list)
  128. layout_res.extend(ocr_result_list)
  129. ocr_time += time.time() - ocr_start
  130. ocr_count += len(ocr_res_list)
  131. # 表格识别 table recognition
  132. if self.model.apply_table:
  133. table_start = time.time()
  134. for res in table_res_list:
  135. new_image, _ = crop_img(res, pil_img)
  136. single_table_start_time = time.time()
  137. html_code = None
  138. if self.model.table_model_name == MODEL_NAME.STRUCT_EQTABLE:
  139. with torch.no_grad():
  140. table_result = self.model.table_model.predict(
  141. new_image, "html"
  142. )
  143. if len(table_result) > 0:
  144. html_code = table_result[0]
  145. elif self.model.table_model_name == MODEL_NAME.TABLE_MASTER:
  146. html_code = self.model.table_model.img2html(new_image)
  147. elif self.model.table_model_name == MODEL_NAME.RAPID_TABLE:
  148. html_code, table_cell_bboxes, elapse = (
  149. self.model.table_model.predict(new_image)
  150. )
  151. run_time = time.time() - single_table_start_time
  152. if run_time > self.model.table_max_time:
  153. logger.warning(
  154. f"table recognition processing exceeds max time {self.model.table_max_time}s"
  155. )
  156. # 判断是否返回正常
  157. if html_code:
  158. expected_ending = html_code.strip().endswith(
  159. "</html>"
  160. ) or html_code.strip().endswith("</table>")
  161. if expected_ending:
  162. res["html"] = html_code
  163. else:
  164. logger.warning(
  165. "table recognition processing fails, not found expected HTML table end"
  166. )
  167. else:
  168. logger.warning(
  169. "table recognition processing fails, not get html return"
  170. )
  171. table_time += time.time() - table_start
  172. table_count += len(table_res_list)
  173. if self.model.apply_ocr:
  174. logger.info(f"ocr time: {round(ocr_time, 2)}, image num: {ocr_count}")
  175. else:
  176. logger.info(f"det time: {round(ocr_time, 2)}, image num: {ocr_count}")
  177. if self.model.apply_table:
  178. logger.info(f"table time: {round(table_time, 2)}, image num: {table_count}")
  179. return images_layout_res
  180. def doc_batch_analyze(
  181. dataset: Dataset,
  182. ocr: bool = False,
  183. show_log: bool = False,
  184. start_page_id=0,
  185. end_page_id=None,
  186. lang=None,
  187. layout_model=None,
  188. formula_enable=None,
  189. table_enable=None,
  190. batch_ratio: int | None = None,
  191. ) -> InferenceResult:
  192. """
  193. Perform batch analysis on a document dataset.
  194. Args:
  195. dataset (Dataset): The dataset containing document pages to be analyzed.
  196. ocr (bool, optional): Flag to enable OCR (Optical Character Recognition). Defaults to False.
  197. show_log (bool, optional): Flag to enable logging. Defaults to False.
  198. start_page_id (int, optional): The starting page ID for analysis. Defaults to 0.
  199. end_page_id (int, optional): The ending page ID for analysis. Defaults to None, which means analyze till the last page.
  200. lang (str, optional): Language for OCR. Defaults to None.
  201. layout_model (optional): Layout model to be used for analysis. Defaults to None.
  202. formula_enable (optional): Flag to enable formula detection. Defaults to None.
  203. table_enable (optional): Flag to enable table detection. Defaults to None.
  204. batch_ratio (int | None, optional): Ratio for batch processing. Defaults to None, which sets it to 1.
  205. Raises:
  206. CUDA_NOT_AVAILABLE: If CUDA is not available, raises an exception as batch analysis is not supported in CPU mode.
  207. Returns:
  208. InferenceResult: The result of the batch analysis containing the analyzed data and the dataset.
  209. """
  210. if not torch.cuda.is_available():
  211. raise CUDA_NOT_AVAILABLE("batch analyze not support in CPU mode")
  212. lang = None if lang == "" else lang
  213. # TODO: auto detect batch size
  214. batch_ratio = 1 if batch_ratio is None else batch_ratio
  215. end_page_id = end_page_id if end_page_id else len(dataset)
  216. model_manager = ModelSingleton()
  217. custom_model: CustomPEKModel = model_manager.get_model(
  218. ocr, show_log, lang, layout_model, formula_enable, table_enable
  219. )
  220. batch_model = BatchAnalyze(model=custom_model, batch_ratio=batch_ratio)
  221. model_json = []
  222. # batch analyze
  223. images = []
  224. for index in range(len(dataset)):
  225. if start_page_id <= index <= end_page_id:
  226. page_data = dataset.get_page(index)
  227. img_dict = page_data.get_image()
  228. images.append(img_dict["img"])
  229. analyze_result = batch_model(images)
  230. for index in range(len(dataset)):
  231. page_data = dataset.get_page(index)
  232. img_dict = page_data.get_image()
  233. page_width = img_dict["width"]
  234. page_height = img_dict["height"]
  235. if start_page_id <= index <= end_page_id:
  236. result = analyze_result.pop(0)
  237. else:
  238. result = []
  239. page_info = {"page_no": index, "height": page_height, "width": page_width}
  240. page_dict = {"layout_dets": result, "page_info": page_info}
  241. model_json.append(page_dict)
  242. # TODO: clean memory when gpu memory is not enough
  243. clean_memory_start_time = time.time()
  244. clean_memory()
  245. logger.info(f"clean memory time: {round(time.time() - clean_memory_start_time, 2)}")
  246. return InferenceResult(model_json, dataset)