pipeline.py 9.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229
  1. # copyright (c) 2024 PaddlePaddle Authors. All Rights Reserve.
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. from ..base import BasePipeline
  15. from typing import Any, Dict, Optional
  16. import numpy as np
  17. import cv2
  18. from ..components import CropByBoxes
  19. from .utils import convert_points_to_boxes, get_sub_regions_ocr_res
  20. from .table_recognition_post_processing import get_table_recognition_res
  21. from .result import LayoutParsingResult
  22. ########## [TODO]后续需要更新路径
  23. from ...components.transforms import ReadImage
  24. class LayoutParsingPipeline(BasePipeline):
  25. """Layout Parsing Pipeline"""
  26. entities = "layout_parsing"
  27. def __init__(
  28. self,
  29. config,
  30. device=None,
  31. pp_option=None,
  32. use_hpip: bool = False,
  33. hpi_params: Optional[Dict[str, Any]] = None,
  34. ):
  35. super().__init__(
  36. device=device, pp_option=pp_option, use_hpip=use_hpip, hpi_params=hpi_params
  37. )
  38. self.inintial_predictor(config)
  39. self.img_reader = ReadImage(format="BGR")
  40. self._crop_by_boxes = CropByBoxes()
  41. def inintial_predictor(self, config):
  42. layout_det_config = config["SubModules"]["LayoutDetection"]
  43. self.layout_det_model = self.create_model(layout_det_config)
  44. self.use_doc_preprocessor = False
  45. if "use_doc_preprocessor" in config:
  46. self.use_doc_preprocessor = config["use_doc_preprocessor"]
  47. if self.use_doc_preprocessor:
  48. doc_preprocessor_config = config["SubPipelines"]["DocPreprocessor"]
  49. self.doc_preprocessor_pipeline = self.create_pipeline(
  50. doc_preprocessor_config
  51. )
  52. self.use_common_ocr = False
  53. if "use_common_ocr" in config:
  54. self.use_common_ocr = config["use_common_ocr"]
  55. if self.use_common_ocr:
  56. common_ocr_config = config["SubPipelines"]["CommonOCR"]
  57. self.common_ocr_pipeline = self.create_pipeline(common_ocr_config)
  58. self.use_seal_recognition = False
  59. if "use_seal_recognition" in config:
  60. self.use_seal_recognition = config["use_seal_recognition"]
  61. if self.use_seal_recognition:
  62. seal_ocr_config = config["SubPipelines"]["SealOCR"]
  63. self.seal_ocr_pipeline = self.create_pipeline(seal_ocr_config)
  64. self.use_table_recognition = False
  65. if "use_table_recognition" in config:
  66. self.use_table_recognition = config["use_table_recognition"]
  67. if self.use_table_recognition:
  68. table_structure_config = config["SubModules"]["TableStructurePredictor"]
  69. self.table_structure_model = self.create_model(table_structure_config)
  70. if not self.use_common_ocr:
  71. common_ocr_config = config["SubPipelines"]["OCR"]
  72. self.common_ocr_pipeline = self.create_pipeline(common_ocr_config)
  73. return
  74. def get_text_paragraphs_ocr_res(self, overall_ocr_res, layout_det_res):
  75. """get ocr res of the text paragraphs"""
  76. object_boxes = []
  77. for box_info in layout_det_res["boxes"]:
  78. if box_info["label"].lower() in ["image", "formula", "table", "seal"]:
  79. object_boxes.append(box_info["coordinate"])
  80. object_boxes = np.array(object_boxes)
  81. return get_sub_regions_ocr_res(overall_ocr_res, object_boxes, flag_within=False)
  82. def check_input_params(self, input_params):
  83. if input_params["use_doc_preprocessor"] and not self.use_doc_preprocessor:
  84. raise ValueError("The models for doc preprocessor are not initialized.")
  85. if input_params["use_common_ocr"] and not self.use_common_ocr:
  86. raise ValueError("The models for common OCR are not initialized.")
  87. if input_params["use_seal_recognition"] and not self.use_seal_recognition:
  88. raise ValueError("The models for seal recognition are not initialized.")
  89. if input_params["use_table_recognition"] and not self.use_table_recognition:
  90. raise ValueError("The models for table recognition are not initialized.")
  91. return
  92. def predict(
  93. self,
  94. input,
  95. use_doc_orientation_classify=True,
  96. use_doc_unwarping=True,
  97. use_common_ocr=True,
  98. use_seal_recognition=True,
  99. use_table_recognition=True,
  100. **kwargs
  101. ):
  102. if not isinstance(input, list):
  103. input_list = [input]
  104. else:
  105. input_list = input
  106. input_params = {
  107. "use_doc_preprocessor": self.use_doc_preprocessor,
  108. "use_doc_orientation_classify": use_doc_orientation_classify,
  109. "use_doc_unwarping": use_doc_unwarping,
  110. "use_common_ocr": use_common_ocr,
  111. "use_seal_recognition": use_seal_recognition,
  112. "use_table_recognition": use_table_recognition,
  113. }
  114. if use_doc_orientation_classify or use_doc_unwarping:
  115. input_params["use_doc_preprocessor"] = True
  116. self.check_input_params(input_params)
  117. img_id = 1
  118. for input in input_list:
  119. if isinstance(input, str):
  120. image_array = next(self.img_reader(input))[0]["img"]
  121. else:
  122. image_array = input
  123. assert len(image_array.shape) == 3
  124. if input_params["use_doc_preprocessor"]:
  125. doc_preprocessor_res = next(
  126. self.doc_preprocessor_pipeline(
  127. image_array,
  128. use_doc_orientation_classify=use_doc_orientation_classify,
  129. use_doc_unwarping=use_doc_unwarping,
  130. )
  131. )
  132. doc_preprocessor_image = doc_preprocessor_res["output_img"]
  133. doc_preprocessor_res["img_id"] = img_id
  134. else:
  135. doc_preprocessor_res = {}
  136. doc_preprocessor_image = image_array
  137. ########## [TODO]RT-DETR 检测结果有重复
  138. layout_det_res = next(self.layout_det_model(doc_preprocessor_image))
  139. if input_params["use_common_ocr"] or input_params["use_table_recognition"]:
  140. overall_ocr_res = next(self.common_ocr_pipeline(doc_preprocessor_image))
  141. overall_ocr_res["img_id"] = img_id
  142. dt_boxes = convert_points_to_boxes(overall_ocr_res["dt_polys"])
  143. overall_ocr_res["dt_boxes"] = dt_boxes
  144. else:
  145. overall_ocr_res = {}
  146. text_paragraphs_ocr_res = {}
  147. if input_params["use_common_ocr"]:
  148. text_paragraphs_ocr_res = self.get_text_paragraphs_ocr_res(
  149. overall_ocr_res, layout_det_res
  150. )
  151. text_paragraphs_ocr_res["img_id"] = img_id
  152. table_res_list = []
  153. if input_params["use_table_recognition"]:
  154. table_region_id = 1
  155. for box_info in layout_det_res["boxes"]:
  156. if box_info["label"].lower() in ["table"]:
  157. crop_img_info = self._crop_by_boxes(
  158. doc_preprocessor_image, [box_info]
  159. )
  160. crop_img_info = crop_img_info[0]
  161. table_structure_pred = next(
  162. self.table_structure_model(crop_img_info["img"])
  163. )
  164. table_recognition_res = get_table_recognition_res(
  165. crop_img_info, table_structure_pred, overall_ocr_res
  166. )
  167. table_recognition_res["table_region_id"] = table_region_id
  168. table_region_id += 1
  169. table_res_list.append(table_recognition_res)
  170. seal_res_list = []
  171. if input_params["use_seal_recognition"]:
  172. seal_region_id = 1
  173. for box_info in layout_det_res["boxes"]:
  174. if box_info["label"].lower() in ["seal"]:
  175. crop_img_info = self._crop_by_boxes(
  176. doc_preprocessor_image, [box_info]
  177. )
  178. crop_img_info = crop_img_info[0]
  179. seal_ocr_res = next(
  180. self.seal_ocr_pipeline(crop_img_info["img"])
  181. )
  182. seal_ocr_res["seal_region_id"] = seal_region_id
  183. seal_region_id += 1
  184. seal_res_list.append(seal_ocr_res)
  185. single_img_res = {
  186. "layout_det_res": layout_det_res,
  187. "doc_preprocessor_res": doc_preprocessor_res,
  188. "text_paragraphs_ocr_res": text_paragraphs_ocr_res,
  189. "table_res_list": table_res_list,
  190. "seal_res_list": seal_res_list,
  191. "input_params": input_params,
  192. }
  193. yield LayoutParsingResult(single_img_res)