pipeline.py 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343
  1. # copyright (c) 2024 PaddlePaddle Authors. All Rights Reserve.
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. from typing import Any, Dict, Optional
  15. import os, sys
  16. import numpy as np
  17. import cv2
  18. from ..base import BasePipeline
  19. from .utils import get_sub_regions_ocr_res
  20. from ..components import convert_points_to_boxes
  21. from .result import LayoutParsingResult
  22. from ....utils import logging
  23. from ...utils.pp_option import PaddlePredictorOption
  24. from ...common.reader import ReadImage
  25. from ...common.batch_sampler import ImageBatchSampler
  26. from ..ocr.result import OCRResult
  27. from ..doc_preprocessor.result import DocPreprocessorResult
  28. # [TODO] 待更新models_new到models
  29. from ...models_new.object_detection.result import DetResult
  30. class LayoutParsingPipeline(BasePipeline):
  31. """Layout Parsing Pipeline"""
  32. entities = ["layout_parsing"]
  33. def __init__(
  34. self,
  35. config: Dict,
  36. device: str = None,
  37. pp_option: PaddlePredictorOption = None,
  38. use_hpip: bool = False,
  39. hpi_params: Optional[Dict[str, Any]] = None,
  40. ) -> None:
  41. """Initializes the layout parsing pipeline.
  42. Args:
  43. config (Dict): Configuration dictionary containing various settings.
  44. device (str, optional): Device to run the predictions on. Defaults to None.
  45. pp_option (PaddlePredictorOption, optional): PaddlePredictor options. Defaults to None.
  46. use_hpip (bool, optional): Whether to use high-performance inference (hpip) for prediction. Defaults to False.
  47. hpi_params (Optional[Dict[str, Any]], optional): HPIP parameters. Defaults to None.
  48. """
  49. super().__init__(
  50. device=device, pp_option=pp_option, use_hpip=use_hpip, hpi_params=hpi_params
  51. )
  52. self.inintial_predictor(config)
  53. self.batch_sampler = ImageBatchSampler(batch_size=1)
  54. self.img_reader = ReadImage(format="BGR")
  55. def set_used_models_flag(self, config: Dict) -> None:
  56. """
  57. Set the flags for which models to use based on the configuration.
  58. Args:
  59. config (Dict): A dictionary containing configuration settings.
  60. Returns:
  61. None
  62. """
  63. pipeline_name = config["pipeline_name"]
  64. self.pipeline_name = pipeline_name
  65. self.use_doc_preprocessor = False
  66. self.use_general_ocr = False
  67. self.use_seal_recognition = False
  68. self.use_table_recognition = False
  69. if "use_doc_preprocessor" in config:
  70. self.use_doc_preprocessor = config["use_doc_preprocessor"]
  71. if "use_general_ocr" in config:
  72. self.use_general_ocr = config["use_general_ocr"]
  73. if "use_seal_recognition" in config:
  74. self.use_seal_recognition = config["use_seal_recognition"]
  75. if "use_table_recognition" in config:
  76. self.use_table_recognition = config["use_table_recognition"]
  77. def inintial_predictor(self, config: Dict) -> None:
  78. """Initializes the predictor based on the provided configuration.
  79. Args:
  80. config (Dict): A dictionary containing the configuration for the predictor.
  81. Returns:
  82. None
  83. """
  84. self.set_used_models_flag(config)
  85. layout_det_config = config["SubModules"]["LayoutDetection"]
  86. self.layout_det_model = self.create_model(layout_det_config)
  87. if self.use_doc_preprocessor:
  88. doc_preprocessor_config = config["SubPipelines"]["DocPreprocessor"]
  89. self.doc_preprocessor_pipeline = self.create_pipeline(
  90. doc_preprocessor_config
  91. )
  92. if self.use_general_ocr or self.use_table_recognition:
  93. general_ocr_config = config["SubPipelines"]["GeneralOCR"]
  94. self.general_ocr_pipeline = self.create_pipeline(general_ocr_config)
  95. if self.use_seal_recognition:
  96. seal_recognition_config = config["SubPipelines"]["SealRecognition"]
  97. self.seal_recognition_pipeline = self.create_pipeline(
  98. seal_recognition_config
  99. )
  100. if self.use_table_recognition:
  101. table_recognition_config = config["SubPipelines"]["TableRecognition"]
  102. self.table_recognition_pipeline = self.create_pipeline(
  103. table_recognition_config
  104. )
  105. return
  106. def get_text_paragraphs_ocr_res(
  107. self, overall_ocr_res: OCRResult, layout_det_res: DetResult
  108. ) -> OCRResult:
  109. """
  110. Retrieves the OCR results for text paragraphs, excluding those of formulas, tables, and seals.
  111. Args:
  112. overall_ocr_res (OCRResult): The overall OCR result containing text information.
  113. layout_det_res (DetResult): The detection result containing the layout information of the document.
  114. Returns:
  115. OCRResult: The OCR result for text paragraphs after excluding formulas, tables, and seals.
  116. """
  117. object_boxes = []
  118. for box_info in layout_det_res["boxes"]:
  119. if box_info["label"].lower() in ["formula", "table", "seal"]:
  120. object_boxes.append(box_info["coordinate"])
  121. object_boxes = np.array(object_boxes)
  122. return get_sub_regions_ocr_res(overall_ocr_res, object_boxes, flag_within=False)
  123. def check_input_params_valid(self, input_params: Dict) -> bool:
  124. """
  125. Check if the input parameters are valid based on the initialized models.
  126. Args:
  127. input_params (Dict): A dictionary containing input parameters.
  128. Returns:
  129. bool: True if all required models are initialized according to input parameters, False otherwise.
  130. """
  131. if input_params["use_doc_preprocessor"] and not self.use_doc_preprocessor:
  132. logging.error(
  133. "Set use_doc_preprocessor, but the models for doc preprocessor are not initialized."
  134. )
  135. return False
  136. if input_params["use_general_ocr"] and not self.use_general_ocr:
  137. logging.error(
  138. "Set use_general_ocr, but the models for general OCR are not initialized."
  139. )
  140. return False
  141. if input_params["use_seal_recognition"] and not self.use_seal_recognition:
  142. logging.error(
  143. "Set use_seal_recognition, but the models for seal recognition are not initialized."
  144. )
  145. return False
  146. if input_params["use_table_recognition"] and not self.use_table_recognition:
  147. logging.error(
  148. "Set use_table_recognition, but the models for table recognition are not initialized."
  149. )
  150. return False
  151. return True
  152. def predict_doc_preprocessor_res(
  153. self, image_array: np.ndarray, input_params: dict
  154. ) -> tuple[DocPreprocessorResult, np.ndarray]:
  155. """
  156. Preprocess the document image based on input parameters.
  157. Args:
  158. image_array (np.ndarray): The input image array.
  159. input_params (dict): Dictionary containing preprocessing parameters.
  160. Returns:
  161. tuple[DocPreprocessorResult, np.ndarray]: A tuple containing the preprocessing
  162. result dictionary and the processed image array.
  163. """
  164. if input_params["use_doc_preprocessor"]:
  165. use_doc_orientation_classify = input_params["use_doc_orientation_classify"]
  166. use_doc_unwarping = input_params["use_doc_unwarping"]
  167. doc_preprocessor_res = next(
  168. self.doc_preprocessor_pipeline(
  169. image_array,
  170. use_doc_orientation_classify=use_doc_orientation_classify,
  171. use_doc_unwarping=use_doc_unwarping,
  172. )
  173. )
  174. doc_preprocessor_image = doc_preprocessor_res["output_img"]
  175. else:
  176. doc_preprocessor_res = {}
  177. doc_preprocessor_image = image_array
  178. return doc_preprocessor_res, doc_preprocessor_image
  179. def predict_overall_ocr_res(self, image_array: np.ndarray) -> OCRResult:
  180. """
  181. Predict the overall OCR result for the given image array.
  182. Args:
  183. image_array (np.ndarray): The input image array to perform OCR on.
  184. Returns:
  185. OCRResult: The predicted OCR result with updated dt_boxes.
  186. """
  187. overall_ocr_res = next(self.general_ocr_pipeline(image_array))
  188. dt_boxes = convert_points_to_boxes(overall_ocr_res["dt_polys"])
  189. overall_ocr_res["dt_boxes"] = dt_boxes
  190. return overall_ocr_res
  191. def predict(
  192. self,
  193. input: str | list[str] | np.ndarray | list[np.ndarray],
  194. use_doc_orientation_classify: bool = False,
  195. use_doc_unwarping: bool = False,
  196. use_general_ocr: bool = True,
  197. use_seal_recognition: bool = True,
  198. use_table_recognition: bool = True,
  199. **kwargs
  200. ) -> LayoutParsingResult:
  201. """
  202. This function predicts the layout parsing result for the given input.
  203. Args:
  204. input (str | list[str] | np.ndarray | list[np.ndarray]): The input image(s) or pdf(s) to be processed.
  205. use_doc_orientation_classify (bool): Whether to use document orientation classification.
  206. use_doc_unwarping (bool): Whether to use document unwarping.
  207. use_general_ocr (bool): Whether to use general OCR.
  208. use_seal_recognition (bool): Whether to use seal recognition.
  209. use_table_recognition (bool): Whether to use table recognition.
  210. **kwargs: Additional keyword arguments.
  211. Returns:
  212. LayoutParsingResult: The predicted layout parsing result.
  213. """
  214. input_params = {
  215. "use_doc_preprocessor": self.use_doc_preprocessor,
  216. "use_doc_orientation_classify": use_doc_orientation_classify,
  217. "use_doc_unwarping": use_doc_unwarping,
  218. "use_general_ocr": use_general_ocr,
  219. "use_seal_recognition": use_seal_recognition,
  220. "use_table_recognition": use_table_recognition,
  221. }
  222. if use_doc_orientation_classify or use_doc_unwarping:
  223. input_params["use_doc_preprocessor"] = True
  224. else:
  225. input_params["use_doc_preprocessor"] = False
  226. if not self.check_input_params_valid(input_params):
  227. yield None
  228. for img_id, batch_data in enumerate(self.batch_sampler(input)):
  229. image_array = self.img_reader(batch_data)[0]
  230. img_id += 1
  231. doc_preprocessor_res, doc_preprocessor_image = (
  232. self.predict_doc_preprocessor_res(image_array, input_params)
  233. )
  234. layout_det_res = next(self.layout_det_model(doc_preprocessor_image))
  235. if input_params["use_general_ocr"] or input_params["use_table_recognition"]:
  236. overall_ocr_res = self.predict_overall_ocr_res(doc_preprocessor_image)
  237. else:
  238. overall_ocr_res = {}
  239. if input_params["use_general_ocr"]:
  240. text_paragraphs_ocr_res = self.get_text_paragraphs_ocr_res(
  241. overall_ocr_res, layout_det_res
  242. )
  243. else:
  244. text_paragraphs_ocr_res = {}
  245. if input_params["use_table_recognition"]:
  246. table_res_list = next(
  247. self.table_recognition_pipeline(
  248. doc_preprocessor_image,
  249. use_layout_detection=False,
  250. use_doc_orientation_classify=False,
  251. use_doc_unwarping=False,
  252. overall_ocr_res=overall_ocr_res,
  253. layout_det_res=layout_det_res,
  254. )
  255. )
  256. table_res_list = table_res_list["table_res_list"]
  257. else:
  258. table_res_list = []
  259. if input_params["use_seal_recognition"]:
  260. seal_res_list = next(
  261. self.seal_recognition_pipeline(
  262. doc_preprocessor_image,
  263. use_layout_detection=False,
  264. use_doc_orientation_classify=False,
  265. use_doc_unwarping=False,
  266. layout_det_res=layout_det_res,
  267. )
  268. )
  269. seal_res_list = seal_res_list["seal_res_list"]
  270. else:
  271. seal_res_list = []
  272. single_img_res = {
  273. "layout_det_res": layout_det_res,
  274. "doc_preprocessor_res": doc_preprocessor_res,
  275. "overall_ocr_res": overall_ocr_res,
  276. "text_paragraphs_ocr_res": text_paragraphs_ocr_res,
  277. "table_res_list": table_res_list,
  278. "seal_res_list": seal_res_list,
  279. "input_params": input_params,
  280. "img_id": img_id,
  281. }
  282. yield LayoutParsingResult(single_img_res)