iou3d_nms_api.cpp 3.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114
  1. // Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved.
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. #include <cuda.h>
  15. #include <cuda_runtime_api.h>
  16. #include <paddle/extension.h>
  17. #include <vector>
  18. #include "iou3d_cpu.h"
  19. #include "iou3d_nms.h"
  20. std::vector<paddle::DataType>
  21. BoxesIouBevCpuInferDtype(paddle::DataType boxes_a_dtype,
  22. paddle::DataType boxes_b_dtype) {
  23. return {boxes_a_dtype};
  24. }
  25. std::vector<std::vector<int64_t>>
  26. BoxesIouBevCpuInferShape(std::vector<int64_t> boxes_a_shape,
  27. std::vector<int64_t> boxes_b_shape) {
  28. return {{boxes_a_shape[0], boxes_b_shape[0]}};
  29. }
  30. std::vector<paddle::DataType> NmsInferDtype(paddle::DataType boxes_dtype) {
  31. return {paddle::DataType::INT64, paddle::DataType::INT64};
  32. }
  33. std::vector<std::vector<int64_t>>
  34. NmsInferShape(std::vector<int64_t> boxes_shape) {
  35. return {{boxes_shape[0]}, {1}};
  36. }
  37. std::vector<paddle::DataType>
  38. NmsNormalInferDtype(paddle::DataType boxes_dtype) {
  39. return {paddle::DataType::INT64, paddle::DataType::INT64};
  40. }
  41. std::vector<std::vector<int64_t>>
  42. NmsNormalInferShape(std::vector<int64_t> boxes_shape) {
  43. return {{boxes_shape[0]}, {1}};
  44. }
  45. std::vector<paddle::DataType>
  46. BoxesIouBevGpuInferDtype(paddle::DataType boxes_a_dtype,
  47. paddle::DataType boxes_b_dtype) {
  48. return {boxes_a_dtype};
  49. }
  50. std::vector<std::vector<int64_t>>
  51. BoxesIouBevGpuInferShape(std::vector<int64_t> boxes_a_shape,
  52. std::vector<int64_t> boxes_b_shape) {
  53. return {{boxes_a_shape[0], boxes_b_shape[0]}};
  54. }
  55. std::vector<paddle::DataType>
  56. BoxesOverlapBevGpuInferDtype(paddle::DataType boxes_a_dtype,
  57. paddle::DataType boxes_b_dtype) {
  58. return {boxes_a_dtype};
  59. }
  60. std::vector<std::vector<int64_t>>
  61. BoxesOverlapBevGpuInferShape(std::vector<int64_t> boxes_a_shape,
  62. std::vector<int64_t> boxes_b_shape) {
  63. return {{boxes_a_shape[0], boxes_b_shape[0]}};
  64. }
  65. PD_BUILD_OP(boxes_iou_bev_cpu)
  66. .Inputs({"boxes_a_tensor", " boxes_b_tensor"})
  67. .Outputs({"ans_iou_tensor"})
  68. .SetKernelFn(PD_KERNEL(boxes_iou_bev_cpu))
  69. .SetInferDtypeFn(PD_INFER_DTYPE(BoxesIouBevCpuInferDtype))
  70. .SetInferShapeFn(PD_INFER_SHAPE(BoxesIouBevCpuInferShape));
  71. PD_BUILD_OP(boxes_iou_bev_gpu)
  72. .Inputs({"boxes_a_tensor", " boxes_b_tensor"})
  73. .Outputs({"ans_iou_tensor"})
  74. .SetKernelFn(PD_KERNEL(boxes_iou_bev_gpu))
  75. .SetInferDtypeFn(PD_INFER_DTYPE(BoxesIouBevGpuInferDtype))
  76. .SetInferShapeFn(PD_INFER_SHAPE(BoxesIouBevGpuInferShape));
  77. PD_BUILD_OP(boxes_overlap_bev_gpu)
  78. .Inputs({"boxes_a", " boxes_b"})
  79. .Outputs({"ans_overlap"})
  80. .SetKernelFn(PD_KERNEL(boxes_overlap_bev_gpu))
  81. .SetInferDtypeFn(PD_INFER_DTYPE(BoxesOverlapBevGpuInferDtype))
  82. .SetInferShapeFn(PD_INFER_SHAPE(BoxesOverlapBevGpuInferShape));
  83. PD_BUILD_OP(nms_gpu)
  84. .Inputs({"boxes"})
  85. .Outputs({"keep", "num_to_keep"})
  86. .Attrs({"nms_overlap_thresh: float"})
  87. .SetKernelFn(PD_KERNEL(nms_gpu))
  88. .SetInferDtypeFn(PD_INFER_DTYPE(NmsInferDtype))
  89. .SetInferShapeFn(PD_INFER_SHAPE(NmsInferShape));
  90. PD_BUILD_OP(nms_normal_gpu)
  91. .Inputs({"boxes"})
  92. .Outputs({"keep", "num_to_keep"})
  93. .Attrs({"nms_overlap_thresh: float"})
  94. .SetInferShapeFn(PD_INFER_SHAPE(NmsNormalInferShape))
  95. .SetKernelFn(PD_KERNEL(nms_normal_gpu))
  96. .SetInferDtypeFn(PD_INFER_DTYPE(NmsNormalInferDtype));