yolov5seg_pybind.cc 5.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122
  1. // Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. #include "ultra_infer/pybind/main.h"
  15. namespace ultra_infer {
  16. void BindYOLOv5Seg(pybind11::module &m) {
  17. pybind11::class_<vision::detection::YOLOv5SegPreprocessor>(
  18. m, "YOLOv5SegPreprocessor")
  19. .def(pybind11::init<>())
  20. .def("run",
  21. [](vision::detection::YOLOv5SegPreprocessor &self,
  22. std::vector<pybind11::array> &im_list) {
  23. std::vector<vision::FDMat> images;
  24. for (size_t i = 0; i < im_list.size(); ++i) {
  25. images.push_back(vision::WrapMat(PyArrayToCvMat(im_list[i])));
  26. }
  27. std::vector<FDTensor> outputs;
  28. std::vector<std::map<std::string, std::array<float, 2>>> ims_info;
  29. if (!self.Run(&images, &outputs, &ims_info)) {
  30. throw std::runtime_error("Failed to preprocess the input data "
  31. "in PaddleClasPreprocessor.");
  32. }
  33. for (size_t i = 0; i < outputs.size(); ++i) {
  34. outputs[i].StopSharing();
  35. }
  36. return make_pair(outputs, ims_info);
  37. })
  38. .def_property("size", &vision::detection::YOLOv5SegPreprocessor::GetSize,
  39. &vision::detection::YOLOv5SegPreprocessor::SetSize)
  40. .def_property("padding_value",
  41. &vision::detection::YOLOv5SegPreprocessor::GetPaddingValue,
  42. &vision::detection::YOLOv5SegPreprocessor::SetPaddingValue)
  43. .def_property("is_scale_up",
  44. &vision::detection::YOLOv5SegPreprocessor::GetScaleUp,
  45. &vision::detection::YOLOv5SegPreprocessor::SetScaleUp)
  46. .def_property("is_mini_pad",
  47. &vision::detection::YOLOv5SegPreprocessor::GetMiniPad,
  48. &vision::detection::YOLOv5SegPreprocessor::SetMiniPad)
  49. .def_property("stride",
  50. &vision::detection::YOLOv5SegPreprocessor::GetStride,
  51. &vision::detection::YOLOv5SegPreprocessor::SetStride);
  52. pybind11::class_<vision::detection::YOLOv5SegPostprocessor>(
  53. m, "YOLOv5SegPostprocessor")
  54. .def(pybind11::init<>())
  55. .def("run",
  56. [](vision::detection::YOLOv5SegPostprocessor &self,
  57. std::vector<FDTensor> &inputs,
  58. const std::vector<std::map<std::string, std::array<float, 2>>>
  59. &ims_info) {
  60. std::vector<vision::DetectionResult> results;
  61. if (!self.Run(inputs, &results, ims_info)) {
  62. throw std::runtime_error("Failed to postprocess the runtime "
  63. "result in YOLOv5SegPostprocessor.");
  64. }
  65. return results;
  66. })
  67. .def("run",
  68. [](vision::detection::YOLOv5SegPostprocessor &self,
  69. std::vector<pybind11::array> &input_array,
  70. const std::vector<std::map<std::string, std::array<float, 2>>>
  71. &ims_info) {
  72. std::vector<vision::DetectionResult> results;
  73. std::vector<FDTensor> inputs;
  74. PyArrayToTensorList(input_array, &inputs, /*share_buffer=*/true);
  75. if (!self.Run(inputs, &results, ims_info)) {
  76. throw std::runtime_error("Failed to postprocess the runtime "
  77. "result in YOLOv5SegPostprocessor.");
  78. }
  79. return results;
  80. })
  81. .def_property(
  82. "conf_threshold",
  83. &vision::detection::YOLOv5SegPostprocessor::GetConfThreshold,
  84. &vision::detection::YOLOv5SegPostprocessor::SetConfThreshold)
  85. .def_property("nms_threshold",
  86. &vision::detection::YOLOv5SegPostprocessor::GetNMSThreshold,
  87. &vision::detection::YOLOv5SegPostprocessor::SetNMSThreshold)
  88. .def_property("multi_label",
  89. &vision::detection::YOLOv5SegPostprocessor::GetMultiLabel,
  90. &vision::detection::YOLOv5SegPostprocessor::SetMultiLabel);
  91. pybind11::class_<vision::detection::YOLOv5Seg, UltraInferModel>(m,
  92. "YOLOv5Seg")
  93. .def(pybind11::init<std::string, std::string, RuntimeOption,
  94. ModelFormat>())
  95. .def("predict",
  96. [](vision::detection::YOLOv5Seg &self, pybind11::array &data) {
  97. auto mat = PyArrayToCvMat(data);
  98. vision::DetectionResult res;
  99. self.Predict(mat, &res);
  100. return res;
  101. })
  102. .def("batch_predict",
  103. [](vision::detection::YOLOv5Seg &self,
  104. std::vector<pybind11::array> &data) {
  105. std::vector<cv::Mat> images;
  106. for (size_t i = 0; i < data.size(); ++i) {
  107. images.push_back(PyArrayToCvMat(data[i]));
  108. }
  109. std::vector<vision::DetectionResult> results;
  110. self.BatchPredict(images, &results);
  111. return results;
  112. })
  113. .def_property_readonly("preprocessor",
  114. &vision::detection::YOLOv5Seg::GetPreprocessor)
  115. .def_property_readonly("postprocessor",
  116. &vision::detection::YOLOv5Seg::GetPostprocessor);
  117. }
  118. } // namespace ultra_infer