predictor.py 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303
  1. # Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved.
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. import shutil
  15. import tempfile
  16. from typing import Any, Dict, Iterator, List, Tuple
  17. from ....modules.m_3d_bev_detection.model_list import MODELS
  18. from ....utils import logging
  19. from ....utils.func_register import FuncRegister
  20. from ...common.batch_sampler import Det3DBatchSampler
  21. from ...common.reader import ReadNuscenesData
  22. from ..base import BasePredictor
  23. from ..base.predictor.base_predictor import PredictionWrap
  24. from .processors import (
  25. GetInferInput,
  26. LoadMultiViewImageFromFiles,
  27. LoadPointsFromFile,
  28. LoadPointsFromMultiSweeps,
  29. NormalizeImage,
  30. PadImage,
  31. ResizeImage,
  32. SampleFilterByKey,
  33. )
  34. from .result import BEV3DDetResult
  35. class BEVDet3DPredictor(BasePredictor):
  36. """BEVDet3DPredictor that inherits from BasePredictor."""
  37. entities = MODELS
  38. _FUNC_MAP = {}
  39. register = FuncRegister(_FUNC_MAP)
  40. def __init__(self, *args: List, **kwargs: Dict) -> None:
  41. """Initializes BEVDet3DPredictor.
  42. Args:
  43. *args: Arbitrary positional arguments passed to the superclass.
  44. **kwargs: Arbitrary keyword arguments passed to the superclass.
  45. """
  46. self.temp_dir = tempfile.mkdtemp()
  47. logging.info(
  48. f"infer data will be stored in temporary directory {self.temp_dir}"
  49. )
  50. super().__init__(*args, **kwargs)
  51. self.pre_tfs, self.infer = self._build()
  52. def _build_batch_sampler(self) -> Det3DBatchSampler:
  53. """Builds and returns an Det3DBatchSampler instance.
  54. Returns:
  55. Det3DBatchSampler: An instance of Det3DBatchSampler.
  56. """
  57. return Det3DBatchSampler(temp_dir=self.temp_dir)
  58. def _get_result_class(self) -> type:
  59. """Returns the result class, BEV3DDetResult.
  60. Returns:
  61. type: The BEV3DDetResult class.
  62. """
  63. return BEV3DDetResult
  64. def _build(self) -> Tuple:
  65. """Build the preprocessors and inference engine based on the configuration.
  66. Returns:
  67. tuple: A tuple containing the preprocessors and inference engine.
  68. """
  69. import paddle
  70. if paddle.is_compiled_with_cuda() and not paddle.is_compiled_with_rocm():
  71. from ....ops.iou3d_nms import nms_gpu # noqa: F401
  72. from ....ops.voxelize import hard_voxelize # noqa: F401
  73. else:
  74. logging.error("3D BEVFusion custom ops only support GPU platform!")
  75. pre_tfs = {"Read": ReadNuscenesData()}
  76. for cfg in self.config["PreProcess"]["transform_ops"]:
  77. tf_key = list(cfg.keys())[0]
  78. func = self._FUNC_MAP[tf_key]
  79. args = cfg.get(tf_key, {})
  80. name, op = func(self, **args) if args else func(self)
  81. if op:
  82. pre_tfs[name] = op
  83. pre_tfs["GetInferInput"] = GetInferInput()
  84. infer = self.create_static_infer()
  85. return pre_tfs, infer
  86. def _format_output(
  87. self, infer_input: List[Any], outs: List[Any], img_metas: Dict[str, Any]
  88. ) -> Dict[str, Any]:
  89. """format inference input and output into predict result
  90. Args:
  91. infer_input(List): Model infer inputs with list containing images, points and lidar2img matrix.
  92. outs(List): Model infer output containing bboxes, scores, labels result.
  93. img_metas(Dict): Image metas info of input sample.
  94. Returns:
  95. Dict: A Dict containing formatted inference output results.
  96. """
  97. input_lidar_path = img_metas["input_lidar_path"]
  98. input_img_paths = img_metas["input_img_paths"]
  99. sample_id = img_metas["sample_id"]
  100. results = {}
  101. out_bboxes_3d = []
  102. out_scores_3d = []
  103. out_labels_3d = []
  104. input_imgs = []
  105. input_points = []
  106. input_lidar2imgs = []
  107. input_ids = []
  108. input_lidar_path_list = []
  109. input_img_paths_list = []
  110. out_bboxes_3d.append(outs[0])
  111. out_labels_3d.append(outs[1])
  112. out_scores_3d.append(outs[2])
  113. input_imgs.append(infer_input[1])
  114. input_points.append(infer_input[0])
  115. input_lidar2imgs.append(infer_input[2])
  116. input_ids.append(sample_id)
  117. input_lidar_path_list.append(input_lidar_path)
  118. input_img_paths_list.append(input_img_paths)
  119. results["input_path"] = input_lidar_path_list
  120. results["input_img_paths"] = input_img_paths_list
  121. results["sample_id"] = input_ids
  122. results["boxes_3d"] = out_bboxes_3d
  123. results["labels_3d"] = out_labels_3d
  124. results["scores_3d"] = out_scores_3d
  125. return results
  126. def process(self, batch_data: List[str]) -> Dict[str, Any]:
  127. """
  128. Process a batch of data through the preprocessing and inference.
  129. Args:
  130. batch_data (List[str]): A batch of input data (e.g., sample anno file paths).
  131. Returns:
  132. dict: A dictionary containing the input path, input img, input points, input lidar2img, output bboxes, output labels, output scores and label names. Keys include 'input_path', 'input_img', 'input_points', 'input_lidar2img', 'boxes_3d', 'labels_3d' and 'scores_3d'.
  133. """
  134. sample = self.pre_tfs["Read"](batch_data=batch_data)
  135. sample = self.pre_tfs["LoadPointsFromFile"](results=sample[0])
  136. sample = self.pre_tfs["LoadPointsFromMultiSweeps"](results=sample)
  137. sample = self.pre_tfs["LoadMultiViewImageFromFiles"](sample=sample)
  138. sample = self.pre_tfs["ResizeImage"](results=sample)
  139. sample = self.pre_tfs["NormalizeImage"](results=sample)
  140. sample = self.pre_tfs["PadImage"](results=sample)
  141. sample = self.pre_tfs["SampleFilterByKey"](sample=sample)
  142. infer_input, img_metas = self.pre_tfs["GetInferInput"](sample=sample)
  143. infer_output = self.infer(x=infer_input)
  144. results = self._format_output(infer_input, infer_output, img_metas)
  145. return results
  146. @register("LoadPointsFromFile")
  147. def build_load_img_from_file(
  148. self, load_dim=6, use_dim=[0, 1, 2], shift_height=False, use_color=False
  149. ):
  150. return "LoadPointsFromFile", LoadPointsFromFile(
  151. load_dim=load_dim,
  152. use_dim=use_dim,
  153. shift_height=shift_height,
  154. use_color=use_color,
  155. )
  156. @register("LoadPointsFromMultiSweeps")
  157. def build_load_points_from_multi_sweeps(
  158. self,
  159. sweeps_num=10,
  160. load_dim=5,
  161. use_dim=[0, 1, 2, 4],
  162. pad_empty_sweeps=False,
  163. remove_close=False,
  164. test_mode=False,
  165. point_cloud_angle_range=None,
  166. ):
  167. return "LoadPointsFromMultiSweeps", LoadPointsFromMultiSweeps(
  168. sweeps_num=sweeps_num,
  169. load_dim=load_dim,
  170. use_dim=use_dim,
  171. pad_empty_sweeps=pad_empty_sweeps,
  172. remove_close=remove_close,
  173. test_mode=test_mode,
  174. point_cloud_angle_range=point_cloud_angle_range,
  175. )
  176. @register("LoadMultiViewImageFromFiles")
  177. def build_load_multi_view_image_from_files(
  178. self,
  179. to_float32=False,
  180. project_pts_to_img_depth=False,
  181. cam_depth_range=[4.0, 45.0, 1.0],
  182. constant_std=0.5,
  183. imread_flag=-1,
  184. ):
  185. return "LoadMultiViewImageFromFiles", LoadMultiViewImageFromFiles(
  186. to_float32=to_float32,
  187. project_pts_to_img_depth=project_pts_to_img_depth,
  188. cam_depth_range=cam_depth_range,
  189. constant_std=constant_std,
  190. imread_flag=imread_flag,
  191. )
  192. @register("ResizeImage")
  193. def build_resize_image(
  194. self,
  195. img_scale=None,
  196. multiscale_mode="range",
  197. ratio_range=None,
  198. keep_ratio=True,
  199. bbox_clip_border=True,
  200. backend="cv2",
  201. override=False,
  202. ):
  203. return "ResizeImage", ResizeImage(
  204. img_scale=img_scale,
  205. multiscale_mode=multiscale_mode,
  206. ratio_range=ratio_range,
  207. keep_ratio=keep_ratio,
  208. bbox_clip_border=bbox_clip_border,
  209. backend=backend,
  210. override=override,
  211. )
  212. @register("NormalizeImage")
  213. def build_normalize_image(self, mean, std, to_rgb=True):
  214. return "NormalizeImage", NormalizeImage(mean=mean, std=std, to_rgb=to_rgb)
  215. @register("PadImage")
  216. def build_pad_image(self, size=None, size_divisor=None, pad_val=0):
  217. return "PadImage", PadImage(
  218. size=size, size_divisor=size_divisor, pad_val=pad_val
  219. )
  220. @register("SampleFilterByKey")
  221. def build_sample_filter_by_key(
  222. self,
  223. keys,
  224. meta_keys=(
  225. "filename",
  226. "ori_shape",
  227. "img_shape",
  228. "lidar2img",
  229. "depth2img",
  230. "cam2img",
  231. "pad_shape",
  232. "scale_factor",
  233. "flip",
  234. "pcd_horizontal_flip",
  235. "pcd_vertical_flip",
  236. "box_type_3d",
  237. "img_norm_cfg",
  238. "pcd_trans",
  239. "sample_idx",
  240. "pcd_scale_factor",
  241. "pcd_rotation",
  242. "pts_filename",
  243. "transformation_3d_flow",
  244. ),
  245. ):
  246. return "SampleFilterByKey", SampleFilterByKey(keys=keys, meta_keys=meta_keys)
  247. @register("GetInferInput")
  248. def build_get_infer_input(self):
  249. return "GetInferInput", GetInferInput()
  250. def apply(self, input: Any, **kwargs) -> Iterator[Any]:
  251. """
  252. Do predicting with the input data and yields predictions.
  253. Args:
  254. input (Any): The input data to be predicted.
  255. Yields:
  256. Iterator[Any]: An iterator yielding prediction results.
  257. """
  258. try:
  259. for batch_data in self.batch_sampler(input):
  260. prediction = self.process(batch_data, **kwargs)
  261. prediction = PredictionWrap(prediction, len(batch_data))
  262. for idx in range(len(batch_data)):
  263. yield self.result_class(prediction.get_by_idx(idx))
  264. except Exception as e:
  265. raise e
  266. finally:
  267. shutil.rmtree(self.temp_dir)