predictor.py 7.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246
  1. # copyright (c) 2024 PaddlePaddle Authors. All Rights Reserve.
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. from typing import Any, List, Sequence
  15. import numpy as np
  16. from ....utils.func_register import FuncRegister
  17. from ....modules.object_detection.model_list import MODELS
  18. from ...common.batch_sampler import ImageBatchSampler
  19. from ..common import StaticInfer
  20. from ..base import BasicPredictor
  21. from .processors import (
  22. DetPad,
  23. DetPostProcess,
  24. Normalize,
  25. PadStride,
  26. ReadImage,
  27. Resize,
  28. ToBatch,
  29. ToCHWImage,
  30. WarpAffine,
  31. )
  32. from .result import DetResult
  33. class DetPredictor(BasicPredictor):
  34. entities = MODELS
  35. _FUNC_MAP = {}
  36. register = FuncRegister(_FUNC_MAP)
  37. def __init__(self, *args, **kwargs):
  38. super().__init__(*args, **kwargs)
  39. self.pre_ops, self.infer, self.post_op = self._build()
  40. def _build_batch_sampler(self):
  41. return ImageBatchSampler()
  42. def _get_result_class(self):
  43. return DetResult
  44. def _build(self):
  45. # build preprocess ops
  46. pre_ops = [ReadImage(format="RGB")]
  47. for cfg in self.config["Preprocess"]:
  48. tf_key = cfg["type"]
  49. func = self._FUNC_MAP[tf_key]
  50. cfg.pop("type")
  51. args = cfg
  52. op = func(self, **args) if args else func(self)
  53. if op:
  54. pre_ops.append(op)
  55. pre_ops.append(self.build_to_batch())
  56. # build infer
  57. infer = StaticInfer(
  58. model_dir=self.model_dir,
  59. model_prefix=self.MODEL_FILE_PREFIX,
  60. option=self.pp_option,
  61. )
  62. # build postprocess op
  63. post_op = self.build_postprocess()
  64. return pre_ops, infer, post_op
  65. def _format_output(self, pred: Sequence[Any]) -> List[dict]:
  66. """
  67. Transform batch outputs into a list of single image output.
  68. Args:
  69. pred (Sequence[Any]): The input predictions, which can be either a list of 3 or 4 elements.
  70. - When len(pred) == 4, it is expected to be in the format [boxes, class_ids, scores, masks],
  71. compatible with SOLOv2 output.
  72. - When len(pred) == 3, it is expected to be in the format [boxes, box_nums, masks],
  73. compatible with Instance Segmentation output.
  74. Returns:
  75. List[dict]: A list of dictionaries, each containing either 'class_id' and 'masks' (for SOLOv2),
  76. or 'boxes' and 'masks' (for Instance Segmentation), or just 'boxes' if no masks are provided.
  77. """
  78. box_idx_start = 0
  79. pred_box = []
  80. if len(pred) == 4:
  81. # Adapt to SOLOv2
  82. pred_class_id = []
  83. pred_mask = []
  84. pred_class_id.append([pred[1], pred[2]])
  85. pred_mask.append(pred[3])
  86. return [
  87. {
  88. "class_id": np.array(pred_class_id[i]),
  89. "masks": np.array(pred_mask[i]),
  90. }
  91. for i in range(len(pred_class_id))
  92. ]
  93. if len(pred) == 3:
  94. # Adapt to Instance Segmentation
  95. pred_mask = []
  96. for idx in range(len(pred[1])):
  97. np_boxes_num = pred[1][idx]
  98. box_idx_end = box_idx_start + np_boxes_num
  99. np_boxes = pred[0][box_idx_start:box_idx_end]
  100. pred_box.append(np_boxes)
  101. if len(pred) == 3:
  102. np_masks = pred[2][box_idx_start:box_idx_end]
  103. pred_mask.append(np_masks)
  104. box_idx_start = box_idx_end
  105. if len(pred) == 3:
  106. return [
  107. {"boxes": np.array(pred_box[i]), "masks": np.array(pred_mask[i])}
  108. for i in range(len(pred_box))
  109. ]
  110. else:
  111. return [{"boxes": np.array(res)} for res in pred_box]
  112. def process(self, batch_data: List[Any]):
  113. """
  114. Process a batch of data through the preprocessing, inference, and postprocessing.
  115. Args:
  116. batch_data (List[Union[str, np.ndarray], ...]): A batch of input data (e.g., image file paths).
  117. Returns:
  118. dict: A dictionary containing the input path, raw image, class IDs, scores, and label names
  119. for every instance of the batch. Keys include 'input_path', 'input_img', 'class_ids', 'scores', and 'label_names'.
  120. """
  121. datas = batch_data
  122. # preprocess
  123. for pre_op in self.pre_ops[:-1]:
  124. datas = pre_op(datas)
  125. # use `ToBatch` format batch inputs
  126. batch_inputs = self.pre_ops[-1](datas)
  127. # do infer
  128. batch_preds = self.infer(batch_inputs)
  129. # process a batch of predictions into a list of single image result
  130. preds_list = self._format_output(batch_preds)
  131. # postprocess
  132. boxes = self.post_op(preds_list, datas)
  133. return {
  134. "input_path": [data.get("img_path", None) for data in datas],
  135. "input_img": [data["ori_img"] for data in datas],
  136. "boxes": boxes,
  137. }
  138. @register("Resize")
  139. def build_resize(self, target_size, keep_ratio=False, interp=2):
  140. assert target_size
  141. if isinstance(interp, int):
  142. interp = {
  143. 0: "NEAREST",
  144. 1: "LINEAR",
  145. 2: "CUBIC",
  146. 3: "AREA",
  147. 4: "LANCZOS4",
  148. }[interp]
  149. op = Resize(target_size=target_size[::-1], keep_ratio=keep_ratio, interp=interp)
  150. return op
  151. @register("NormalizeImage")
  152. def build_normalize(
  153. self,
  154. norm_type=None,
  155. mean=[0.485, 0.456, 0.406],
  156. std=[0.229, 0.224, 0.225],
  157. is_scale=True,
  158. ):
  159. if is_scale:
  160. scale = 1.0 / 255.0
  161. else:
  162. scale = 1
  163. if not norm_type or norm_type == "none":
  164. norm_type = "mean_std"
  165. if norm_type != "mean_std":
  166. mean = 0
  167. std = 1
  168. return Normalize(scale=scale, mean=mean, std=std)
  169. @register("Permute")
  170. def build_to_chw(self):
  171. return ToCHWImage()
  172. @register("Pad")
  173. def build_pad(self, fill_value=None, size=None):
  174. if fill_value is None:
  175. fill_value = [127.5, 127.5, 127.5]
  176. if size is None:
  177. size = [3, 640, 640]
  178. return DetPad(size=size, fill_value=fill_value)
  179. @register("PadStride")
  180. def build_pad_stride(self, stride=32):
  181. return PadStride(stride=stride)
  182. @register("WarpAffine")
  183. def build_warp_affine(self, input_h=512, input_w=512, keep_res=True):
  184. return WarpAffine(input_h=input_h, input_w=input_w, keep_res=keep_res)
  185. def build_to_batch(self):
  186. model_names_required_imgsize = [
  187. "DETR",
  188. "RCNN",
  189. "YOLOv3",
  190. "CenterNet",
  191. "BlazeFace",
  192. "BlazeFace-FPN-SSH",
  193. ]
  194. if any(name in self.model_name for name in model_names_required_imgsize):
  195. ordered_required_keys = (
  196. "img_size",
  197. "img",
  198. "scale_factors",
  199. )
  200. else:
  201. ordered_required_keys = ("img", "scale_factors")
  202. return ToBatch(ordered_required_keys=ordered_required_keys)
  203. def build_postprocess(self):
  204. return DetPostProcess(
  205. threshold=self.config["draw_threshold"],
  206. labels=self.config["label_list"],
  207. layout_postprocess=self.config.get("layout_postprocess", False),
  208. )