pipeline.py 4.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129
  1. # copyright (c) 2024 PaddlePaddle Authors. All Rights Reserve.
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. from ..base import BasePipeline
  15. from typing import Any, Dict, Optional
  16. from ..components import SortQuadBoxes, SortPolyBoxes, CropByPolys
  17. from .result import OCRResult
  18. ########## [TODO]后续需要更新路径
  19. from ...components.transforms import ReadImage
  20. from ...utils.pp_option import PaddlePredictorOption
  21. import numpy as np
  22. class OCRPipeline(BasePipeline):
  23. """OCR Pipeline"""
  24. entities = "OCR"
  25. def __init__(
  26. self,
  27. config: Dict,
  28. device: str = None,
  29. pp_option: PaddlePredictorOption = None,
  30. use_hpip: bool = False,
  31. hpi_params: Optional[Dict[str, Any]] = None,
  32. ) -> None:
  33. """
  34. Initializes the class with given configurations and options.
  35. Args:
  36. config (Dict): Configuration dictionary containing model and other parameters.
  37. device (str): The device to run the prediction on. Default is None.
  38. pp_option (PaddlePredictorOption): Options for PaddlePaddle predictor. Default is None.
  39. use_hpip (bool): Whether to use high-performance inference (hpip) for prediction. Defaults to False.
  40. hpi_params (Optional[Dict[str, Any]]): HPIP specific parameters. Default is None.
  41. """
  42. super().__init__(
  43. device=device, pp_option=pp_option, use_hpip=use_hpip, hpi_params=hpi_params
  44. )
  45. text_det_model_config = config["SubModules"]["TextDetection"]
  46. self.text_det_model = self.create_model(text_det_model_config)
  47. text_rec_model_config = config["SubModules"]["TextRecognition"]
  48. self.text_rec_model = self.create_model(text_rec_model_config)
  49. self.text_type = config["text_type"]
  50. if self.text_type == "general":
  51. self._sort_boxes = SortQuadBoxes()
  52. self._crop_by_polys = CropByPolys(det_box_type="quad")
  53. elif self.text_type == "seal":
  54. self._sort_boxes = SortPolyBoxes()
  55. self._crop_by_polys = CropByPolys(det_box_type="poly")
  56. else:
  57. raise ValueError("Unsupported text type {}".format(self.text_type))
  58. self.img_reader = ReadImage(format="BGR")
  59. def predict(
  60. self, input: str | list[str] | np.ndarray | list[np.ndarray], **kwargs
  61. ) -> OCRResult:
  62. """Predicts OCR results for the given input.
  63. Args:
  64. input (str | list[str] | np.ndarray | list[np.ndarray]): The input image(s) or path(s) to the images.
  65. **kwargs: Additional keyword arguments that can be passed to the function.
  66. Returns:
  67. OCRResult: An iterable of OCRResult objects, each containing the predicted text and other relevant information.
  68. """
  69. if not isinstance(input, list):
  70. input_list = [input]
  71. else:
  72. input_list = input
  73. img_id = 1
  74. for input in input_list:
  75. if isinstance(input, str):
  76. image_array = next(self.img_reader(input))[0]["img"]
  77. else:
  78. image_array = input
  79. assert len(image_array.shape) == 3
  80. det_res = next(self.text_det_model(image_array))
  81. dt_polys = det_res["dt_polys"]
  82. dt_scores = det_res["dt_scores"]
  83. ########## [TODO] Need to confirm filtering thresholds for detection and recognition modules
  84. dt_polys = self._sort_boxes(dt_polys)
  85. single_img_res = {
  86. "input_img": image_array,
  87. "dt_polys": dt_polys,
  88. "img_id": img_id,
  89. "text_type": self.text_type,
  90. }
  91. img_id += 1
  92. single_img_res["rec_text"] = []
  93. single_img_res["rec_score"] = []
  94. if len(dt_polys) > 0:
  95. all_subs_of_img = list(self._crop_by_polys(image_array, dt_polys))
  96. ########## [TODO] Update in the future
  97. for sub_img in all_subs_of_img:
  98. sub_img["input"] = sub_img["img"]
  99. ##########
  100. for rec_res in self.text_rec_model(all_subs_of_img):
  101. single_img_res["rec_text"].append(rec_res["rec_text"])
  102. single_img_res["rec_score"].append(rec_res["rec_score"])
  103. yield OCRResult(single_img_res)