manager_pybind.cc 2.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657
  1. // Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. #include "ultra_infer/pybind/main.h"
  15. namespace ultra_infer {
  16. namespace vision {
  17. // PyProcessorManager is used for pybind11::init() of ProcessorManager
  18. // Because ProcessorManager have a pure Virtual function Apply()
  19. class ULTRAINFER_DECL PyProcessorManager : public ProcessorManager {
  20. public:
  21. using ProcessorManager::ProcessorManager;
  22. bool Apply(FDMatBatch *image_batch, std::vector<FDTensor> *outputs) override {
  23. PYBIND11_OVERRIDE_PURE(bool, ProcessorManager, Apply, image_batch, outputs);
  24. }
  25. };
  26. } // namespace vision
  27. void BindProcessorManager(pybind11::module &m) {
  28. pybind11::class_<vision::ProcessorManager, vision::PyProcessorManager>(
  29. m, "ProcessorManager")
  30. .def(pybind11::init<>())
  31. .def("run",
  32. [](vision::ProcessorManager &self,
  33. std::vector<pybind11::array> &im_list) {
  34. std::vector<vision::FDMat> images;
  35. for (size_t i = 0; i < im_list.size(); ++i) {
  36. images.push_back(vision::WrapMat(PyArrayToCvMat(im_list[i])));
  37. }
  38. std::vector<FDTensor> outputs;
  39. if (!self.Run(&images, &outputs)) {
  40. throw std::runtime_error("Failed to process the input data");
  41. }
  42. if (!self.CudaUsed()) {
  43. for (size_t i = 0; i < outputs.size(); ++i) {
  44. outputs[i].StopSharing();
  45. }
  46. }
  47. return outputs;
  48. })
  49. .def("pre_apply", &vision::ProcessorManager::PreApply)
  50. .def("post_apply", &vision::ProcessorManager::PostApply)
  51. .def("use_cuda",
  52. [](vision::ProcessorManager &self, bool enable_cv_cuda = false,
  53. int gpu_id = -1) { self.UseCuda(enable_cv_cuda, gpu_id); });
  54. }
  55. } // namespace ultra_infer