predictor.py 7.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202
  1. # Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved.
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. from typing import Any, List, Optional, Sequence, Tuple
  15. import numpy as np
  16. from ....modules.instance_segmentation.model_list import MODELS
  17. from ....utils import logging
  18. from ..object_detection import DetPredictor
  19. from ..object_detection.processors import ReadImage, ToBatch
  20. from .processors import InstanceSegPostProcess
  21. from .result import InstanceSegResult
  22. class InstanceSegPredictor(DetPredictor):
  23. """InstanceSegPredictor that inherits from DetPredictor."""
  24. entities = MODELS
  25. def __init__(self, *args, threshold: Optional[float] = None, **kwargs):
  26. """Initializes InstanceSegPredictor.
  27. Args:
  28. *args: Arbitrary positional arguments passed to the superclass.
  29. threshold (Optional[float], optional): The threshold for filtering out low-confidence predictions.
  30. Defaults to None, in which case will use default from the config file.
  31. **kwargs: Arbitrary keyword arguments passed to the superclass.
  32. """
  33. super().__init__(*args, **kwargs)
  34. self.model_names_only_supports_batchsize_of_one = {
  35. "SOLOv2",
  36. "PP-YOLOE_seg-S",
  37. "Cascade-MaskRCNN-ResNet50-vd-SSLDv2-FPN",
  38. "Cascade-MaskRCNN-ResNet50-FPN",
  39. }
  40. if self.model_name in self.model_names_only_supports_batchsize_of_one:
  41. logging.warning(
  42. f"Instance Segmentation Models: \"{', '.join(list(self.model_names_only_supports_batchsize_of_one))}\" only supports prediction with a batch_size of one, "
  43. "if you set the predictor with a batch_size larger than one, no error will occur, however, it will actually inference with a batch_size of one, "
  44. f"which will lead to a slower inference speed. You are now using {self.config['Global']['model_name']}."
  45. )
  46. self.threshold = threshold
  47. def _get_result_class(self) -> type:
  48. """Returns the result class, InstanceSegResult.
  49. Returns:
  50. type: The InstanceSegResult class.
  51. """
  52. return InstanceSegResult
  53. def _build(self) -> Tuple:
  54. """Build the preprocessors, inference engine, and postprocessors based on the configuration.
  55. Returns:
  56. tuple: A tuple containing the preprocessors, inference engine, and postprocessors.
  57. """
  58. # build preprocess ops
  59. pre_ops = [ReadImage(format="RGB")]
  60. for cfg in self.config["Preprocess"]:
  61. tf_key = cfg["type"]
  62. func = self._FUNC_MAP[tf_key]
  63. cfg.pop("type")
  64. args = cfg
  65. op = func(self, **args) if args else func(self)
  66. if op:
  67. pre_ops.append(op)
  68. pre_ops.append(self.build_to_batch())
  69. # build infer
  70. infer = self.create_static_infer()
  71. # build postprocess op
  72. post_op = self.build_postprocess()
  73. return pre_ops, infer, post_op
  74. def build_to_batch(self):
  75. ordered_required_keys = (
  76. "img_size",
  77. "img",
  78. "scale_factors",
  79. )
  80. return ToBatch(ordered_required_keys=ordered_required_keys)
  81. def process(self, batch_data: List[Any], threshold: Optional[float] = None):
  82. """
  83. Process a batch of data through the preprocessing, inference, and postprocessing.
  84. Args:
  85. batch_data (List[Union[str, np.ndarray], ...]): A batch of input data (e.g., image file paths).
  86. Returns:
  87. dict: A dictionary containing the input path, raw image, box and mask
  88. for every instance of the batch. Keys include 'input_path', 'input_img', 'boxes' and 'masks'.
  89. """
  90. datas = batch_data.instances
  91. # preprocess
  92. for pre_op in self.pre_ops[:-1]:
  93. datas = pre_op(datas)
  94. # use `ToBatch` format batch inputs
  95. batch_inputs = self.pre_ops[-1](datas)
  96. # do infer
  97. if self.model_name in self.model_names_only_supports_batchsize_of_one:
  98. batch_preds = []
  99. for i in range(batch_inputs[0].shape[0]):
  100. batch_inputs_ = [
  101. batch_input_[i][None, ...] for batch_input_ in batch_inputs
  102. ]
  103. batch_pred_ = self.infer(batch_inputs_)
  104. batch_preds.append(batch_pred_)
  105. else:
  106. batch_preds = self.infer(batch_inputs)
  107. # process a batch of predictions into a list of single image result
  108. preds_list = self._format_output(batch_preds)
  109. # postprocess
  110. boxes_masks = self.post_op(
  111. preds_list, datas, threshold if threshold is not None else self.threshold
  112. )
  113. return {
  114. "input_path": batch_data.input_paths,
  115. "page_index": batch_data.page_indexes,
  116. "input_img": [data["ori_img"] for data in datas],
  117. "boxes": [result["boxes"] for result in boxes_masks],
  118. "masks": [result["masks"] for result in boxes_masks],
  119. }
  120. def _format_output(self, pred: Sequence[Any]) -> List[dict]:
  121. """
  122. Transform batch outputs into a list of single image output.
  123. Args:
  124. pred (Sequence[Any]): The input predictions, which can be either a list of 3 or 4 elements.
  125. - When len(pred) == 4, it is expected to be in the format [boxes, class_ids, scores, masks],
  126. compatible with SOLOv2 output.
  127. - When len(pred) == 3, it is expected to be in the format [boxes, box_nums, masks],
  128. compatible with Instance Segmentation output.
  129. Returns:
  130. List[dict]: A list of dictionaries, each containing either 'class_id' and 'masks' (for SOLOv2),
  131. or 'boxes' and 'masks' (for Instance Segmentation), or just 'boxes' if no masks are provided.
  132. """
  133. box_idx_start = 0
  134. pred_box = []
  135. if isinstance(pred[0], list) and len(pred[0]) == 4:
  136. # Adapt to SOLOv2, which only support prediction with a batch_size of 1.
  137. pred_class_id = [[pred_[1], pred_[2]] for pred_ in pred]
  138. pred_mask = [pred_[3] for pred_ in pred]
  139. return [
  140. {
  141. "class_id": np.array(pred_class_id[i]),
  142. "masks": np.array(pred_mask[i]),
  143. }
  144. for i in range(len(pred_class_id))
  145. ]
  146. if isinstance(pred[0], list) and len(pred[0]) == 3:
  147. # Adapt to PP-YOLOE_seg-S, which only support prediction with a batch_size of 1.
  148. return [
  149. {"boxes": np.array(pred[i][0]), "masks": np.array(pred[i][2])}
  150. for i in range(len(pred))
  151. ]
  152. pred_mask = []
  153. for idx in range(len(pred[1])):
  154. np_boxes_num = pred[1][idx]
  155. box_idx_end = box_idx_start + np_boxes_num
  156. np_boxes = pred[0][box_idx_start:box_idx_end]
  157. pred_box.append(np_boxes)
  158. np_masks = pred[2][box_idx_start:box_idx_end]
  159. pred_mask.append(np_masks)
  160. box_idx_start = box_idx_end
  161. return [
  162. {"boxes": np.array(pred_box[i]), "masks": np.array(pred_mask[i])}
  163. for i in range(len(pred_box))
  164. ]
  165. def build_postprocess(self):
  166. return InstanceSegPostProcess(
  167. threshold=self.config["draw_threshold"], labels=self.config["label_list"]
  168. )