pipeline.py 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310
  1. # copyright (c) 2024 PaddlePaddle Authors. All Rights Reserve.
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. import os, sys
  15. from typing import Any, Dict, Optional
  16. import numpy as np
  17. import cv2
  18. from ..base import BasePipeline
  19. from ..components import CropByBoxes
  20. from ..layout_parsing.utils import convert_points_to_boxes
  21. from .utils import get_neighbor_boxes_idx
  22. from .table_recognition_post_processing import get_table_recognition_res
  23. from .result import SingleTableRecognitionResult, TableRecognitionResult
  24. from ....utils import logging
  25. from ...utils.pp_option import PaddlePredictorOption
  26. from ...common.reader import ReadImage
  27. from ...common.batch_sampler import ImageBatchSampler
  28. from ..ocr.result import OCRResult
  29. from ..doc_preprocessor.result import DocPreprocessorResult
  30. # [TODO] 待更新models_new到models
  31. from ...models_new.object_detection.result import DetResult
  32. class TableRecognitionPipeline(BasePipeline):
  33. """Table Recognition Pipeline"""
  34. entities = ["table_recognition"]
  35. def __init__(
  36. self,
  37. config: Dict,
  38. device: str = None,
  39. pp_option: PaddlePredictorOption = None,
  40. use_hpip: bool = False,
  41. hpi_params: Optional[Dict[str, Any]] = None,
  42. ) -> None:
  43. """Initializes the layout parsing pipeline.
  44. Args:
  45. config (Dict): Configuration dictionary containing various settings.
  46. device (str, optional): Device to run the predictions on. Defaults to None.
  47. pp_option (PaddlePredictorOption, optional): PaddlePredictor options. Defaults to None.
  48. use_hpip (bool, optional): Whether to use high-performance inference (hpip) for prediction. Defaults to False.
  49. hpi_params (Optional[Dict[str, Any]], optional): HPIP parameters. Defaults to None.
  50. """
  51. super().__init__(
  52. device=device, pp_option=pp_option, use_hpip=use_hpip, hpi_params=hpi_params
  53. )
  54. self.use_doc_preprocessor = False
  55. if "use_doc_preprocessor" in config:
  56. self.use_doc_preprocessor = config["use_doc_preprocessor"]
  57. if self.use_doc_preprocessor:
  58. doc_preprocessor_config = config["SubPipelines"]["DocPreprocessor"]
  59. self.doc_preprocessor_pipeline = self.create_pipeline(
  60. doc_preprocessor_config
  61. )
  62. self.use_layout_detection = True
  63. if "use_layout_detection" in config:
  64. self.use_layout_detection = config["use_layout_detection"]
  65. if self.use_layout_detection:
  66. layout_det_config = config["SubModules"]["LayoutDetection"]
  67. self.layout_det_model = self.create_model(layout_det_config)
  68. table_structure_config = config["SubModules"]["TableStructureRecognition"]
  69. self.table_structure_model = self.create_model(table_structure_config)
  70. self.use_ocr_model = True
  71. if "use_ocr_model" in config:
  72. self.use_ocr_model = config["use_ocr_model"]
  73. if self.use_ocr_model:
  74. general_ocr_config = config["SubPipelines"]["GeneralOCR"]
  75. self.general_ocr_pipeline = self.create_pipeline(general_ocr_config)
  76. self._crop_by_boxes = CropByBoxes()
  77. self.batch_sampler = ImageBatchSampler(batch_size=1)
  78. self.img_reader = ReadImage(format="BGR")
  79. def check_input_params_valid(
  80. self, input_params: Dict, overall_ocr_res: OCRResult, layout_det_res: DetResult
  81. ) -> bool:
  82. """
  83. Check if the input parameters are valid based on the initialized models.
  84. Args:
  85. input_params (Dict): A dictionary containing input parameters.
  86. overall_ocr_res (OCRResult): Overall OCR result obtained after running the OCR pipeline.
  87. The overall OCR result with convert_points_to_boxes information.
  88. layout_det_res (DetResult): The layout detection result.
  89. Returns:
  90. bool: True if all required models are initialized according to input parameters, False otherwise.
  91. """
  92. if input_params["use_doc_preprocessor"] and not self.use_doc_preprocessor:
  93. logging.error(
  94. "Set use_doc_preprocessor, but the models for doc preprocessor are not initialized."
  95. )
  96. return False
  97. if input_params["use_layout_detection"]:
  98. if layout_det_res is not None:
  99. logging.error(
  100. "The layout detection model has already been initialized, please set use_layout_detection=False"
  101. )
  102. return False
  103. if not self.use_layout_detection:
  104. logging.error(
  105. "Set use_layout_detection, but the models for layout detection are not initialized."
  106. )
  107. return False
  108. if input_params["use_ocr_model"]:
  109. if overall_ocr_res is not None:
  110. logging.error(
  111. "The OCR models have already been initialized, please set use_ocr_model=False"
  112. )
  113. return False
  114. if not self.use_ocr_model:
  115. logging.error(
  116. "Set use_ocr_model, but the models for OCR are not initialized."
  117. )
  118. return False
  119. return True
  120. def predict_doc_preprocessor_res(
  121. self, image_array: np.ndarray, input_params: dict
  122. ) -> tuple[DocPreprocessorResult, np.ndarray]:
  123. """
  124. Preprocess the document image based on input parameters.
  125. Args:
  126. image_array (np.ndarray): The input image array.
  127. input_params (dict): Dictionary containing preprocessing parameters.
  128. Returns:
  129. tuple[DocPreprocessorResult, np.ndarray]: A tuple containing the preprocessing
  130. result dictionary and the processed image array.
  131. """
  132. if input_params["use_doc_preprocessor"]:
  133. use_doc_orientation_classify = input_params["use_doc_orientation_classify"]
  134. use_doc_unwarping = input_params["use_doc_unwarping"]
  135. doc_preprocessor_res = next(
  136. self.doc_preprocessor_pipeline(
  137. image_array,
  138. use_doc_orientation_classify=use_doc_orientation_classify,
  139. use_doc_unwarping=use_doc_unwarping,
  140. )
  141. )
  142. doc_preprocessor_image = doc_preprocessor_res["output_img"]
  143. else:
  144. doc_preprocessor_res = {}
  145. doc_preprocessor_image = image_array
  146. return doc_preprocessor_res, doc_preprocessor_image
  147. def predict_single_table_recognition_res(
  148. self,
  149. image_array: np.ndarray,
  150. overall_ocr_res: OCRResult,
  151. table_box: list,
  152. flag_find_nei_text: bool = True,
  153. ) -> SingleTableRecognitionResult:
  154. """
  155. Predict table recognition results from an image array, layout detection results, and OCR results.
  156. Args:
  157. image_array (np.ndarray): The input image represented as a numpy array.
  158. overall_ocr_res (OCRResult): Overall OCR result obtained after running the OCR pipeline.
  159. The overall OCR results containing text recognition information.
  160. table_box (list): The table box coordinates.
  161. flag_find_nei_text (bool): Whether to find neighboring text.
  162. Returns:
  163. SingleTableRecognitionResult: single table recognition result.
  164. """
  165. table_structure_pred = next(self.table_structure_model(image_array))
  166. single_table_recognition_res = get_table_recognition_res(
  167. table_box, table_structure_pred, overall_ocr_res
  168. )
  169. neighbor_text = ""
  170. if flag_find_nei_text:
  171. match_idx_list = get_neighbor_boxes_idx(
  172. overall_ocr_res["dt_boxes"], table_box
  173. )
  174. if len(match_idx_list) > 0:
  175. for idx in match_idx_list:
  176. neighbor_text += overall_ocr_res["rec_text"][idx] + "; "
  177. single_table_recognition_res["neighbor_text"] = neighbor_text
  178. return single_table_recognition_res
  179. def predict(
  180. self,
  181. input: str | list[str] | np.ndarray | list[np.ndarray],
  182. use_layout_detection: bool = True,
  183. use_doc_orientation_classify: bool = False,
  184. use_doc_unwarping: bool = False,
  185. overall_ocr_res: OCRResult = None,
  186. layout_det_res: DetResult = None,
  187. **kwargs
  188. ) -> TableRecognitionResult:
  189. """
  190. This function predicts the layout parsing result for the given input.
  191. Args:
  192. input (str | list[str] | np.ndarray | list[np.ndarray]): The input image(s) of pdf(s) to be processed.
  193. use_layout_detection (bool): Whether to use layout detection.
  194. use_doc_orientation_classify (bool): Whether to use document orientation classification.
  195. use_doc_unwarping (bool): Whether to use document unwarping.
  196. overall_ocr_res (OCRResult): The overall OCR result with convert_points_to_boxes information.
  197. It will be used if it is not None and use_ocr_model is False.
  198. layout_det_res (DetResult): The layout detection result.
  199. It will be used if it is not None and use_layout_detection is False.
  200. **kwargs: Additional keyword arguments.
  201. Returns:
  202. TableRecognitionResult: The predicted table recognition result.
  203. """
  204. input_params = {
  205. "use_layout_detection": use_layout_detection,
  206. "use_doc_preprocessor": self.use_doc_preprocessor,
  207. "use_doc_orientation_classify": use_doc_orientation_classify,
  208. "use_doc_unwarping": use_doc_unwarping,
  209. "use_ocr_model": self.use_ocr_model,
  210. }
  211. if use_doc_orientation_classify or use_doc_unwarping:
  212. input_params["use_doc_preprocessor"] = True
  213. else:
  214. input_params["use_doc_preprocessor"] = False
  215. if not self.check_input_params_valid(
  216. input_params, overall_ocr_res, layout_det_res
  217. ):
  218. yield None
  219. for img_id, batch_data in enumerate(self.batch_sampler(input)):
  220. image_array = self.img_reader(batch_data)[0]
  221. img_id += 1
  222. doc_preprocessor_res, doc_preprocessor_image = (
  223. self.predict_doc_preprocessor_res(image_array, input_params)
  224. )
  225. if self.use_ocr_model:
  226. overall_ocr_res = next(
  227. self.general_ocr_pipeline(doc_preprocessor_image)
  228. )
  229. dt_boxes = convert_points_to_boxes(overall_ocr_res["dt_polys"])
  230. overall_ocr_res["dt_boxes"] = dt_boxes
  231. table_res_list = []
  232. table_region_id = 1
  233. if not input_params["use_layout_detection"] and layout_det_res is None:
  234. layout_det_res = {}
  235. img_height, img_width = doc_preprocessor_image.shape[:2]
  236. table_box = [0, 0, img_width - 1, img_height - 1]
  237. single_table_rec_res = self.predict_single_table_recognition_res(
  238. doc_preprocessor_image,
  239. overall_ocr_res,
  240. table_box,
  241. flag_find_nei_text=False,
  242. )
  243. single_table_rec_res["table_region_id"] = table_region_id
  244. table_res_list.append(single_table_rec_res)
  245. table_region_id += 1
  246. else:
  247. if input_params["use_layout_detection"]:
  248. layout_det_res = next(self.layout_det_model(doc_preprocessor_image))
  249. for box_info in layout_det_res["boxes"]:
  250. if box_info["label"].lower() in ["table"]:
  251. crop_img_info = self._crop_by_boxes(image_array, [box_info])
  252. crop_img_info = crop_img_info[0]
  253. table_box = crop_img_info["box"]
  254. single_table_rec_res = (
  255. self.predict_single_table_recognition_res(
  256. crop_img_info["img"], overall_ocr_res, table_box
  257. )
  258. )
  259. single_table_rec_res["table_region_id"] = table_region_id
  260. table_res_list.append(single_table_rec_res)
  261. table_region_id += 1
  262. single_img_res = {
  263. "layout_det_res": layout_det_res,
  264. "doc_preprocessor_res": doc_preprocessor_res,
  265. "overall_ocr_res": overall_ocr_res,
  266. "table_res_list": table_res_list,
  267. "input_params": input_params,
  268. "img_id": img_id,
  269. }
  270. yield TableRecognitionResult(single_img_res)