pipeline.py 4.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114
  1. # copyright (c) 2024 PaddlePaddle Authors. All Rights Reserve.
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. from typing import Any, Dict, Optional
  15. import numpy as np
  16. from ...common.reader import ReadImage
  17. from ...common.batch_sampler import ImageBatchSampler
  18. from ...utils.pp_option import PaddlePredictorOption
  19. from ..base import BasePipeline
  20. from ..components import CropByPolys, SortQuadBoxes, SortPolyBoxes
  21. from .result import OCRResult
  22. class OCRPipeline(BasePipeline):
  23. """OCR Pipeline"""
  24. entities = "OCR"
  25. def __init__(
  26. self,
  27. config: Dict,
  28. device: str = None,
  29. pp_option: PaddlePredictorOption = None,
  30. use_hpip: bool = False,
  31. hpi_params: Optional[Dict[str, Any]] = None,
  32. ) -> None:
  33. """
  34. Initializes the class with given configurations and options.
  35. Args:
  36. config (Dict): Configuration dictionary containing model and other parameters.
  37. device (str): The device to run the prediction on. Default is None.
  38. pp_option (PaddlePredictorOption): Options for PaddlePaddle predictor. Default is None.
  39. use_hpip (bool): Whether to use high-performance inference (hpip) for prediction. Defaults to False.
  40. hpi_params (Optional[Dict[str, Any]]): HPIP specific parameters. Default is None.
  41. """
  42. super().__init__(
  43. device=device, pp_option=pp_option, use_hpip=use_hpip, hpi_params=hpi_params
  44. )
  45. text_det_model_config = config["SubModules"]["TextDetection"]
  46. self.text_det_model = self.create_model(text_det_model_config)
  47. text_rec_model_config = config["SubModules"]["TextRecognition"]
  48. self.text_rec_model = self.create_model(text_rec_model_config)
  49. self.text_type = config["text_type"]
  50. if self.text_type == "general":
  51. self._sort_boxes = SortQuadBoxes()
  52. self._crop_by_polys = CropByPolys(det_box_type="quad")
  53. elif self.text_type == "seal":
  54. self._sort_boxes = SortPolyBoxes()
  55. self._crop_by_polys = CropByPolys(det_box_type="poly")
  56. else:
  57. raise ValueError("Unsupported text type {}".format(self.text_type))
  58. self.batch_sampler = ImageBatchSampler(batch_size=1)
  59. self.img_reader = ReadImage(format="BGR")
  60. def predict(
  61. self, input: str | list[str] | np.ndarray | list[np.ndarray], **kwargs
  62. ) -> OCRResult:
  63. """Predicts OCR results for the given input.
  64. Args:
  65. input (str | list[str] | np.ndarray | list[np.ndarray]): The input image(s) or path(s) to the images or pdf(s).
  66. **kwargs: Additional keyword arguments that can be passed to the function.
  67. Returns:
  68. OCRResult: An iterable of OCRResult objects, each containing the predicted text and other relevant information.
  69. """
  70. for img_id, batch_data in enumerate(self.batch_sampler(input)):
  71. raw_img = self.img_reader(batch_data)[0]
  72. det_res = next(self.text_det_model(raw_img))
  73. dt_polys = det_res["dt_polys"]
  74. dt_scores = det_res["dt_scores"]
  75. ########## [TODO] Need to confirm filtering thresholds for detection and recognition modules
  76. dt_polys = self._sort_boxes(dt_polys)
  77. img_id += 1
  78. single_img_res = {
  79. "input_img": raw_img,
  80. "dt_polys": dt_polys,
  81. "img_id": img_id,
  82. "text_type": self.text_type,
  83. }
  84. single_img_res["rec_text"] = []
  85. single_img_res["rec_score"] = []
  86. if len(dt_polys) > 0:
  87. all_subs_of_img = list(self._crop_by_polys(raw_img, dt_polys))
  88. for rec_res in self.text_rec_model(all_subs_of_img):
  89. single_img_res["rec_text"].append(rec_res["rec_text"])
  90. single_img_res["rec_score"].append(rec_res["rec_score"])
  91. yield OCRResult(single_img_res)