predictor.py 7.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205
  1. # copyright (c) 2024 PaddlePaddle Authors. All Rights Reserve.
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. from typing import Any, Union, Dict, List, Tuple, Sequence, Optional
  15. import numpy as np
  16. from ....modules.instance_segmentation.model_list import MODELS
  17. from ...common.batch_sampler import ImageBatchSampler
  18. from ..object_detection.processors import (
  19. ReadImage,
  20. ToBatch,
  21. )
  22. from .processors import InstanceSegPostProcess
  23. from ..object_detection import DetPredictor
  24. from .result import InstanceSegResult
  25. from ....utils import logging
  26. class InstanceSegPredictor(DetPredictor):
  27. """InstanceSegPredictor that inherits from DetPredictor."""
  28. entities = MODELS
  29. def __init__(self, *args, threshold: Optional[float] = None, **kwargs):
  30. """Initializes InstanceSegPredictor.
  31. Args:
  32. *args: Arbitrary positional arguments passed to the superclass.
  33. threshold (Optional[float], optional): The threshold for filtering out low-confidence predictions.
  34. Defaults to None, in which case will use default from the config file.
  35. **kwargs: Arbitrary keyword arguments passed to the superclass.
  36. """
  37. super().__init__(*args, **kwargs)
  38. self.model_names_only_supports_batchsize_of_one = {
  39. "SOLOv2",
  40. "PP-YOLOE_seg-S",
  41. "Cascade-MaskRCNN-ResNet50-vd-SSLDv2-FPN",
  42. "Cascade-MaskRCNN-ResNet50-FPN",
  43. }
  44. if self.model_name in self.model_names_only_supports_batchsize_of_one:
  45. logging.warning(
  46. f"Instance Segmentation Models: \"{', '.join(list(self.model_names_only_supports_batchsize_of_one))}\" only supports prediction with a batch_size of one, "
  47. "if you set the predictor with a batch_size larger than one, no error will occur, however, it will actually inference with a batch_size of one, "
  48. f"which will lead to a slower inference speed. You are now using {self.config['Global']['model_name']}."
  49. )
  50. self.threshold = threshold
  51. def _get_result_class(self) -> type:
  52. """Returns the result class, InstanceSegResult.
  53. Returns:
  54. type: The InstanceSegResult class.
  55. """
  56. return InstanceSegResult
  57. def _build(self) -> Tuple:
  58. """Build the preprocessors, inference engine, and postprocessors based on the configuration.
  59. Returns:
  60. tuple: A tuple containing the preprocessors, inference engine, and postprocessors.
  61. """
  62. # build preprocess ops
  63. pre_ops = [ReadImage(format="RGB")]
  64. for cfg in self.config["Preprocess"]:
  65. tf_key = cfg["type"]
  66. func = self._FUNC_MAP[tf_key]
  67. cfg.pop("type")
  68. args = cfg
  69. op = func(self, **args) if args else func(self)
  70. if op:
  71. pre_ops.append(op)
  72. pre_ops.append(self.build_to_batch())
  73. # build infer
  74. infer = self.create_static_infer()
  75. # build postprocess op
  76. post_op = self.build_postprocess()
  77. return pre_ops, infer, post_op
  78. def build_to_batch(self):
  79. ordered_required_keys = (
  80. "img_size",
  81. "img",
  82. "scale_factors",
  83. )
  84. return ToBatch(ordered_required_keys=ordered_required_keys)
  85. def process(self, batch_data: List[Any], threshold: Optional[float] = None):
  86. """
  87. Process a batch of data through the preprocessing, inference, and postprocessing.
  88. Args:
  89. batch_data (List[Union[str, np.ndarray], ...]): A batch of input data (e.g., image file paths).
  90. Returns:
  91. dict: A dictionary containing the input path, raw image, box and mask
  92. for every instance of the batch. Keys include 'input_path', 'input_img', 'boxes' and 'masks'.
  93. """
  94. datas = batch_data.instances
  95. # preprocess
  96. for pre_op in self.pre_ops[:-1]:
  97. datas = pre_op(datas)
  98. # use `ToBatch` format batch inputs
  99. batch_inputs = self.pre_ops[-1](datas)
  100. # do infer
  101. if self.model_name in self.model_names_only_supports_batchsize_of_one:
  102. batch_preds = []
  103. for i in range(batch_inputs[0].shape[0]):
  104. batch_inputs_ = [
  105. batch_input_[i][None, ...] for batch_input_ in batch_inputs
  106. ]
  107. batch_pred_ = self.infer(batch_inputs_)
  108. batch_preds.append(batch_pred_)
  109. else:
  110. batch_preds = self.infer(batch_inputs)
  111. # process a batch of predictions into a list of single image result
  112. preds_list = self._format_output(batch_preds)
  113. # postprocess
  114. boxes_masks = self.post_op(
  115. preds_list, datas, threshold if threshold is not None else self.threshold
  116. )
  117. return {
  118. "input_path": batch_data.input_paths,
  119. "page_index": batch_data.page_indexes,
  120. "input_img": [data["ori_img"] for data in datas],
  121. "boxes": [result["boxes"] for result in boxes_masks],
  122. "masks": [result["masks"] for result in boxes_masks],
  123. }
  124. def _format_output(self, pred: Sequence[Any]) -> List[dict]:
  125. """
  126. Transform batch outputs into a list of single image output.
  127. Args:
  128. pred (Sequence[Any]): The input predictions, which can be either a list of 3 or 4 elements.
  129. - When len(pred) == 4, it is expected to be in the format [boxes, class_ids, scores, masks],
  130. compatible with SOLOv2 output.
  131. - When len(pred) == 3, it is expected to be in the format [boxes, box_nums, masks],
  132. compatible with Instance Segmentation output.
  133. Returns:
  134. List[dict]: A list of dictionaries, each containing either 'class_id' and 'masks' (for SOLOv2),
  135. or 'boxes' and 'masks' (for Instance Segmentation), or just 'boxes' if no masks are provided.
  136. """
  137. box_idx_start = 0
  138. pred_box = []
  139. if isinstance(pred[0], list) and len(pred[0]) == 4:
  140. # Adapt to SOLOv2, which only support prediction with a batch_size of 1.
  141. pred_class_id = [[pred_[1], pred_[2]] for pred_ in pred]
  142. pred_mask = [pred_[3] for pred_ in pred]
  143. return [
  144. {
  145. "class_id": np.array(pred_class_id[i]),
  146. "masks": np.array(pred_mask[i]),
  147. }
  148. for i in range(len(pred_class_id))
  149. ]
  150. if isinstance(pred[0], list) and len(pred[0]) == 3:
  151. # Adapt to PP-YOLOE_seg-S, which only support prediction with a batch_size of 1.
  152. return [
  153. {"boxes": np.array(pred[i][0]), "masks": np.array(pred[i][2])}
  154. for i in range(len(pred))
  155. ]
  156. pred_mask = []
  157. for idx in range(len(pred[1])):
  158. np_boxes_num = pred[1][idx]
  159. box_idx_end = box_idx_start + np_boxes_num
  160. np_boxes = pred[0][box_idx_start:box_idx_end]
  161. pred_box.append(np_boxes)
  162. np_masks = pred[2][box_idx_start:box_idx_end]
  163. pred_mask.append(np_masks)
  164. box_idx_start = box_idx_end
  165. return [
  166. {"boxes": np.array(pred_box[i]), "masks": np.array(pred_mask[i])}
  167. for i in range(len(pred_box))
  168. ]
  169. def build_postprocess(self):
  170. return InstanceSegPostProcess(
  171. threshold=self.config["draw_threshold"], labels=self.config["label_list"]
  172. )