predictor.py 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311
  1. # copyright (c) 2024 PaddlePaddle Authors. All Rights Reserve.
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. from typing import Any, Union, Dict, List, Tuple, Iterator
  15. import shutil
  16. import tempfile
  17. from importlib import import_module
  18. import lazy_paddle
  19. from ....utils import logging
  20. if lazy_paddle.is_compiled_with_cuda() and not lazy_paddle.is_compiled_with_rocm():
  21. from ....ops.voxelize import hard_voxelize
  22. from ....ops.iou3d_nms import nms_gpu
  23. else:
  24. logging.error("3D BEVFusion custom ops only support GPU platform!")
  25. from ....utils.func_register import FuncRegister
  26. from ....utils.cache import TEMP_DIR
  27. module_3d_bev_detection = import_module(".3d_bev_detection", "paddlex.modules")
  28. module_3d_model_list = getattr(module_3d_bev_detection, "model_list")
  29. MODELS = getattr(module_3d_model_list, "MODELS")
  30. from ...common.batch_sampler import Det3DBatchSampler
  31. from ...common.reader import ReadNuscenesData
  32. from ..common import StaticInfer
  33. from ..base import BasicPredictor
  34. from ..base.predictor.base_predictor import PredictionWrap
  35. from .processors import (
  36. LoadPointsFromFile,
  37. LoadPointsFromMultiSweeps,
  38. LoadMultiViewImageFromFiles,
  39. ResizeImage,
  40. NormalizeImage,
  41. PadImage,
  42. SampleFilterByKey,
  43. GetInferInput,
  44. )
  45. from .result import BEV3DDetResult
  46. class BEVDet3DPredictor(BasicPredictor):
  47. """BEVDet3DPredictor that inherits from BasicPredictor."""
  48. entities = MODELS
  49. _FUNC_MAP = {}
  50. register = FuncRegister(_FUNC_MAP)
  51. def __init__(self, *args: List, **kwargs: Dict) -> None:
  52. """Initializes BEVDet3DPredictor.
  53. Args:
  54. *args: Arbitrary positional arguments passed to the superclass.
  55. **kwargs: Arbitrary keyword arguments passed to the superclass.
  56. """
  57. self.temp_dir = tempfile.mkdtemp(dir=TEMP_DIR)
  58. logging.info(
  59. f"infer data will be stored in temporary directory {self.temp_dir}"
  60. )
  61. super().__init__(*args, **kwargs)
  62. self.pre_tfs, self.infer = self._build()
  63. def _build_batch_sampler(self) -> Det3DBatchSampler:
  64. """Builds and returns an Det3DBatchSampler instance.
  65. Returns:
  66. Det3DBatchSampler: An instance of Det3DBatchSampler.
  67. """
  68. return Det3DBatchSampler(temp_dir=self.temp_dir)
  69. def _get_result_class(self) -> type:
  70. """Returns the result class, BEV3DDetResult.
  71. Returns:
  72. type: The BEV3DDetResult class.
  73. """
  74. return BEV3DDetResult
  75. def _build(self) -> Tuple:
  76. """Build the preprocessors and inference engine based on the configuration.
  77. Returns:
  78. tuple: A tuple containing the preprocessors and inference engine.
  79. """
  80. pre_tfs = {"Read": ReadNuscenesData()}
  81. for cfg in self.config["PreProcess"]["transform_ops"]:
  82. tf_key = list(cfg.keys())[0]
  83. func = self._FUNC_MAP[tf_key]
  84. args = cfg.get(tf_key, {})
  85. name, op = func(self, **args) if args else func(self)
  86. if op:
  87. pre_tfs[name] = op
  88. infer = StaticInfer(
  89. model_dir=self.model_dir,
  90. model_prefix=self.MODEL_FILE_PREFIX,
  91. option=self.pp_option,
  92. )
  93. return pre_tfs, infer
  94. def _format_output(
  95. self, infer_input: List[Any], outs: List[Any], img_metas: Dict[str, Any]
  96. ) -> Dict[str, Any]:
  97. """format inference input and output into predict result
  98. Args:
  99. infer_input(List): Model infer inputs with list containing images, points and lidar2img matrix.
  100. outs(List): Model infer output containing bboxes, scores, labels result.
  101. img_metas(Dict): Image metas info of input sample.
  102. Returns:
  103. Dict: A Dict containing formatted inference output results.
  104. """
  105. input_lidar_path = img_metas["input_lidar_path"]
  106. input_img_paths = img_metas["input_img_paths"]
  107. sample_id = img_metas["sample_id"]
  108. results = {}
  109. out_bboxes_3d = []
  110. out_scores_3d = []
  111. out_labels_3d = []
  112. input_imgs = []
  113. input_points = []
  114. input_lidar2imgs = []
  115. input_ids = []
  116. input_lidar_path_list = []
  117. input_img_paths_list = []
  118. out_bboxes_3d.append(outs[0])
  119. out_labels_3d.append(outs[1])
  120. out_scores_3d.append(outs[2])
  121. input_imgs.append(infer_input[1])
  122. input_points.append(infer_input[0])
  123. input_lidar2imgs.append(infer_input[2])
  124. input_ids.append(sample_id)
  125. input_lidar_path_list.append(input_lidar_path)
  126. input_img_paths_list.append(input_img_paths)
  127. results["input_path"] = input_lidar_path_list
  128. results["input_img_paths"] = input_img_paths_list
  129. results["sample_id"] = input_ids
  130. results["boxes_3d"] = out_bboxes_3d
  131. results["labels_3d"] = out_labels_3d
  132. results["scores_3d"] = out_scores_3d
  133. return results
  134. def process(self, batch_data: List[str]) -> Dict[str, Any]:
  135. """
  136. Process a batch of data through the preprocessing and inference.
  137. Args:
  138. batch_data (List[str]): A batch of input data (e.g., sample anno file paths).
  139. Returns:
  140. dict: A dictionary containing the input path, input img, input points, input lidar2img, output bboxes, output labels, output scores and label names. Keys include 'input_path', 'input_img', 'input_points', 'input_lidar2img', 'boxes_3d', 'labels_3d' and 'scores_3d'.
  141. """
  142. sample = self.pre_tfs["Read"](batch_data=batch_data)
  143. sample = self.pre_tfs["LoadPointsFromFile"](results=sample[0])
  144. sample = self.pre_tfs["LoadPointsFromMultiSweeps"](results=sample)
  145. sample = self.pre_tfs["LoadMultiViewImageFromFiles"](sample=sample)
  146. sample = self.pre_tfs["ResizeImage"](results=sample)
  147. sample = self.pre_tfs["NormalizeImage"](results=sample)
  148. sample = self.pre_tfs["PadImage"](results=sample)
  149. sample = self.pre_tfs["SampleFilterByKey"](sample=sample)
  150. infer_input, img_metas = self.pre_tfs["GetInferInput"](sample=sample)
  151. infer_output = self.infer(x=infer_input)
  152. results = self._format_output(infer_input, infer_output, img_metas)
  153. return results
  154. @register("LoadPointsFromFile")
  155. def build_load_img_from_file(
  156. self, load_dim=6, use_dim=[0, 1, 2], shift_height=False, use_color=False
  157. ):
  158. return "LoadPointsFromFile", LoadPointsFromFile(
  159. load_dim=load_dim,
  160. use_dim=use_dim,
  161. shift_height=shift_height,
  162. use_color=use_color,
  163. )
  164. @register("LoadPointsFromMultiSweeps")
  165. def build_load_points_from_multi_sweeps(
  166. self,
  167. sweeps_num=10,
  168. load_dim=5,
  169. use_dim=[0, 1, 2, 4],
  170. pad_empty_sweeps=False,
  171. remove_close=False,
  172. test_mode=False,
  173. point_cloud_angle_range=None,
  174. ):
  175. return "LoadPointsFromMultiSweeps", LoadPointsFromMultiSweeps(
  176. sweeps_num=sweeps_num,
  177. load_dim=load_dim,
  178. use_dim=use_dim,
  179. pad_empty_sweeps=pad_empty_sweeps,
  180. remove_close=remove_close,
  181. test_mode=test_mode,
  182. point_cloud_angle_range=point_cloud_angle_range,
  183. )
  184. @register("LoadMultiViewImageFromFiles")
  185. def build_load_multi_view_image_from_files(
  186. self,
  187. to_float32=False,
  188. project_pts_to_img_depth=False,
  189. cam_depth_range=[4.0, 45.0, 1.0],
  190. constant_std=0.5,
  191. imread_flag=-1,
  192. ):
  193. return "LoadMultiViewImageFromFiles", LoadMultiViewImageFromFiles(
  194. to_float32=to_float32,
  195. project_pts_to_img_depth=project_pts_to_img_depth,
  196. cam_depth_range=cam_depth_range,
  197. constant_std=constant_std,
  198. imread_flag=imread_flag,
  199. )
  200. @register("ResizeImage")
  201. def build_resize_image(
  202. self,
  203. img_scale=None,
  204. multiscale_mode="range",
  205. ratio_range=None,
  206. keep_ratio=True,
  207. bbox_clip_border=True,
  208. backend="cv2",
  209. override=False,
  210. ):
  211. return "ResizeImage", ResizeImage(
  212. img_scale=img_scale,
  213. multiscale_mode=multiscale_mode,
  214. ratio_range=ratio_range,
  215. keep_ratio=keep_ratio,
  216. bbox_clip_border=bbox_clip_border,
  217. backend=backend,
  218. override=override,
  219. )
  220. @register("NormalizeImage")
  221. def build_normalize_image(self, mean, std, to_rgb=True):
  222. return "NormalizeImage", NormalizeImage(mean=mean, std=std, to_rgb=to_rgb)
  223. @register("PadImage")
  224. def build_pad_image(self, size=None, size_divisor=None, pad_val=0):
  225. return "PadImage", PadImage(
  226. size=size, size_divisor=size_divisor, pad_val=pad_val
  227. )
  228. @register("SampleFilterByKey")
  229. def build_sample_filter_by_key(
  230. self,
  231. keys,
  232. meta_keys=(
  233. "filename",
  234. "ori_shape",
  235. "img_shape",
  236. "lidar2img",
  237. "depth2img",
  238. "cam2img",
  239. "pad_shape",
  240. "scale_factor",
  241. "flip",
  242. "pcd_horizontal_flip",
  243. "pcd_vertical_flip",
  244. "box_type_3d",
  245. "img_norm_cfg",
  246. "pcd_trans",
  247. "sample_idx",
  248. "pcd_scale_factor",
  249. "pcd_rotation",
  250. "pts_filename",
  251. "transformation_3d_flow",
  252. ),
  253. ):
  254. return "SampleFilterByKey", SampleFilterByKey(keys=keys, meta_keys=meta_keys)
  255. @register("GetInferInput")
  256. def build_get_infer_input(self):
  257. return "GetInferInput", GetInferInput()
  258. def apply(self, input: Any, **kwargs) -> Iterator[Any]:
  259. """
  260. Do predicting with the input data and yields predictions.
  261. Args:
  262. input (Any): The input data to be predicted.
  263. Yields:
  264. Iterator[Any]: An iterator yielding prediction results.
  265. """
  266. try:
  267. for batch_data in self.batch_sampler(input):
  268. prediction = self.process(batch_data, **kwargs)
  269. prediction = PredictionWrap(prediction, len(batch_data))
  270. for idx in range(len(batch_data)):
  271. yield self.result_class(prediction.get_by_idx(idx))
  272. except Exception as e:
  273. raise e
  274. finally:
  275. shutil.rmtree(self.temp_dir)