pipeline.py 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306
  1. # copyright (c) 2024 PaddlePaddle Authors. All Rights Reserve.
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. from ..base import BasePipeline
  15. from typing import Any, Dict, Optional
  16. import numpy as np
  17. import cv2
  18. from ..components import CropByBoxes
  19. from .utils import convert_points_to_boxes, get_sub_regions_ocr_res
  20. from .table_recognition_post_processing import get_table_recognition_res
  21. from .result import LayoutParsingResult
  22. from ....utils import logging
  23. from ...utils.pp_option import PaddlePredictorOption
  24. ########## [TODO]后续需要更新路径
  25. from ...components.transforms import ReadImage
  26. from ..ocr.result import OCRResult
  27. from ...results import DetResult
  28. class LayoutParsingPipeline(BasePipeline):
  29. """Layout Parsing Pipeline"""
  30. entities = "layout_parsing"
  31. def __init__(
  32. self,
  33. config: Dict,
  34. device: str = None,
  35. pp_option: PaddlePredictorOption = None,
  36. use_hpip: bool = False,
  37. hpi_params: Optional[Dict[str, Any]] = None,
  38. ) -> None:
  39. """Initializes the layout parsing pipeline.
  40. Args:
  41. config (Dict): Configuration dictionary containing various settings.
  42. device (str, optional): Device to run the predictions on. Defaults to None.
  43. pp_option (PaddlePredictorOption, optional): PaddlePredictor options. Defaults to None.
  44. use_hpip (bool, optional): Whether to use high-performance inference (hpip) for prediction. Defaults to False.
  45. hpi_params (Optional[Dict[str, Any]], optional): HPIP parameters. Defaults to None.
  46. """
  47. super().__init__(
  48. device=device, pp_option=pp_option, use_hpip=use_hpip, hpi_params=hpi_params
  49. )
  50. self.inintial_predictor(config)
  51. self.img_reader = ReadImage(format="BGR")
  52. self._crop_by_boxes = CropByBoxes()
  53. def inintial_predictor(self, config: Dict) -> None:
  54. """Initializes the predictor based on the provided configuration.
  55. Args:
  56. config (Dict): A dictionary containing the configuration for the predictor.
  57. Returns:
  58. None
  59. """
  60. layout_det_config = config["SubModules"]["LayoutDetection"]
  61. self.layout_det_model = self.create_model(layout_det_config)
  62. self.use_doc_preprocessor = False
  63. if "use_doc_preprocessor" in config:
  64. self.use_doc_preprocessor = config["use_doc_preprocessor"]
  65. if self.use_doc_preprocessor:
  66. doc_preprocessor_config = config["SubPipelines"]["DocPreprocessor"]
  67. self.doc_preprocessor_pipeline = self.create_pipeline(
  68. doc_preprocessor_config
  69. )
  70. self.use_common_ocr = False
  71. if "use_common_ocr" in config:
  72. self.use_common_ocr = config["use_common_ocr"]
  73. if self.use_common_ocr:
  74. common_ocr_config = config["SubPipelines"]["CommonOCR"]
  75. self.common_ocr_pipeline = self.create_pipeline(common_ocr_config)
  76. self.use_seal_recognition = False
  77. if "use_seal_recognition" in config:
  78. self.use_seal_recognition = config["use_seal_recognition"]
  79. if self.use_seal_recognition:
  80. seal_ocr_config = config["SubPipelines"]["SealOCR"]
  81. self.seal_ocr_pipeline = self.create_pipeline(seal_ocr_config)
  82. self.use_table_recognition = False
  83. if "use_table_recognition" in config:
  84. self.use_table_recognition = config["use_table_recognition"]
  85. if self.use_table_recognition:
  86. table_structure_config = config["SubModules"]["TableStructurePredictor"]
  87. self.table_structure_model = self.create_model(table_structure_config)
  88. if not self.use_common_ocr:
  89. common_ocr_config = config["SubPipelines"]["OCR"]
  90. self.common_ocr_pipeline = self.create_pipeline(common_ocr_config)
  91. return
  92. def get_text_paragraphs_ocr_res(
  93. self, overall_ocr_res: OCRResult, layout_det_res: DetResult
  94. ) -> OCRResult:
  95. """
  96. Retrieves the OCR results for text paragraphs, excluding those of formulas, tables, and seals.
  97. Args:
  98. overall_ocr_res (OCRResult): The overall OCR result containing text information.
  99. layout_det_res (DetResult): The detection result containing the layout information of the document.
  100. Returns:
  101. OCRResult: The OCR result for text paragraphs after excluding formulas, tables, and seals.
  102. """
  103. object_boxes = []
  104. for box_info in layout_det_res["boxes"]:
  105. if box_info["label"].lower() in ["formula", "table", "seal"]:
  106. object_boxes.append(box_info["coordinate"])
  107. object_boxes = np.array(object_boxes)
  108. return get_sub_regions_ocr_res(overall_ocr_res, object_boxes, flag_within=False)
  109. def check_input_params_valid(self, input_params: Dict) -> bool:
  110. """
  111. Check if the input parameters are valid based on the initialized models.
  112. Args:
  113. input_params (Dict): A dictionary containing input parameters.
  114. Returns:
  115. bool: True if all required models are initialized according to input parameters, False otherwise.
  116. """
  117. if input_params["use_doc_preprocessor"] and not self.use_doc_preprocessor:
  118. logging.error(
  119. "Set use_doc_preprocessor, but the models for doc preprocessor are not initialized."
  120. )
  121. return False
  122. if input_params["use_common_ocr"] and not self.use_common_ocr:
  123. logging.error(
  124. "Set use_common_ocr, but the models for common OCR are not initialized."
  125. )
  126. return False
  127. if input_params["use_seal_recognition"] and not self.use_seal_recognition:
  128. logging.error(
  129. "Set use_seal_recognition, but the models for seal recognition are not initialized."
  130. )
  131. return False
  132. if input_params["use_table_recognition"] and not self.use_table_recognition:
  133. logging.error(
  134. "Set use_table_recognition, but the models for table recognition are not initialized."
  135. )
  136. return False
  137. return True
  138. def predict(
  139. self,
  140. input: str | list[str] | np.ndarray | list[np.ndarray],
  141. use_doc_orientation_classify: bool = False,
  142. use_doc_unwarping: bool = False,
  143. use_common_ocr: bool = True,
  144. use_seal_recognition: bool = True,
  145. use_table_recognition: bool = True,
  146. **kwargs
  147. ) -> LayoutParsingResult:
  148. """
  149. This function predicts the layout parsing result for the given input.
  150. Args:
  151. input (str | list[str] | np.ndarray | list[np.ndarray]): The input image(s) to be processed.
  152. use_doc_orientation_classify (bool): Whether to use document orientation classification.
  153. use_doc_unwarping (bool): Whether to use document unwarping.
  154. use_common_ocr (bool): Whether to use common OCR.
  155. use_seal_recognition (bool): Whether to use seal recognition.
  156. use_table_recognition (bool): Whether to use table recognition.
  157. **kwargs: Additional keyword arguments.
  158. Returns:
  159. LayoutParsingResult: The predicted layout parsing result.
  160. """
  161. if not isinstance(input, list):
  162. input_list = [input]
  163. else:
  164. input_list = input
  165. input_params = {
  166. "use_doc_preprocessor": self.use_doc_preprocessor,
  167. "use_doc_orientation_classify": use_doc_orientation_classify,
  168. "use_doc_unwarping": use_doc_unwarping,
  169. "use_common_ocr": use_common_ocr,
  170. "use_seal_recognition": use_seal_recognition,
  171. "use_table_recognition": use_table_recognition,
  172. }
  173. if use_doc_orientation_classify or use_doc_unwarping:
  174. input_params["use_doc_preprocessor"] = True
  175. else:
  176. input_params["use_doc_preprocessor"] = False
  177. if not self.check_input_params_valid(input_params):
  178. yield {"error": "input params invalid"}
  179. img_id = 1
  180. for input in input_list:
  181. if isinstance(input, str):
  182. image_array = next(self.img_reader(input))[0]["img"]
  183. else:
  184. image_array = input
  185. assert len(image_array.shape) == 3
  186. if input_params["use_doc_preprocessor"]:
  187. doc_preprocessor_res = next(
  188. self.doc_preprocessor_pipeline(
  189. image_array,
  190. use_doc_orientation_classify=use_doc_orientation_classify,
  191. use_doc_unwarping=use_doc_unwarping,
  192. )
  193. )
  194. doc_preprocessor_image = doc_preprocessor_res["output_img"]
  195. doc_preprocessor_res["img_id"] = img_id
  196. else:
  197. doc_preprocessor_res = {}
  198. doc_preprocessor_image = image_array
  199. ########## [TODO]RT-DETR 检测结果有重复
  200. layout_det_res = next(self.layout_det_model(doc_preprocessor_image))
  201. if input_params["use_common_ocr"] or input_params["use_table_recognition"]:
  202. overall_ocr_res = next(self.common_ocr_pipeline(doc_preprocessor_image))
  203. overall_ocr_res["img_id"] = img_id
  204. dt_boxes = convert_points_to_boxes(overall_ocr_res["dt_polys"])
  205. overall_ocr_res["dt_boxes"] = dt_boxes
  206. else:
  207. overall_ocr_res = {}
  208. text_paragraphs_ocr_res = {}
  209. if input_params["use_common_ocr"]:
  210. text_paragraphs_ocr_res = self.get_text_paragraphs_ocr_res(
  211. overall_ocr_res, layout_det_res
  212. )
  213. text_paragraphs_ocr_res["img_id"] = img_id
  214. table_res_list = []
  215. if input_params["use_table_recognition"]:
  216. table_region_id = 1
  217. for box_info in layout_det_res["boxes"]:
  218. if box_info["label"].lower() in ["table"]:
  219. crop_img_info = self._crop_by_boxes(
  220. doc_preprocessor_image, [box_info]
  221. )
  222. crop_img_info = crop_img_info[0]
  223. table_structure_pred = next(
  224. self.table_structure_model(crop_img_info["img"])
  225. )
  226. table_recognition_res = get_table_recognition_res(
  227. crop_img_info, table_structure_pred, overall_ocr_res
  228. )
  229. table_recognition_res["table_region_id"] = table_region_id
  230. table_region_id += 1
  231. table_res_list.append(table_recognition_res)
  232. seal_res_list = []
  233. if input_params["use_seal_recognition"]:
  234. seal_region_id = 1
  235. for box_info in layout_det_res["boxes"]:
  236. if box_info["label"].lower() in ["seal"]:
  237. crop_img_info = self._crop_by_boxes(
  238. doc_preprocessor_image, [box_info]
  239. )
  240. crop_img_info = crop_img_info[0]
  241. seal_ocr_res = next(
  242. self.seal_ocr_pipeline(crop_img_info["img"])
  243. )
  244. seal_ocr_res["seal_region_id"] = seal_region_id
  245. seal_region_id += 1
  246. seal_res_list.append(seal_ocr_res)
  247. single_img_res = {
  248. "layout_det_res": layout_det_res,
  249. "doc_preprocessor_res": doc_preprocessor_res,
  250. "text_paragraphs_ocr_res": text_paragraphs_ocr_res,
  251. "table_res_list": table_res_list,
  252. "seal_res_list": seal_res_list,
  253. "input_params": input_params,
  254. }
  255. yield LayoutParsingResult(single_img_res)