mat.h 5.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176
  1. // Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. #pragma once
  15. #include "opencv2/core/core.hpp"
  16. #include "ultra_infer/core/fd_tensor.h"
  17. #include "ultra_infer/vision/common/processors/proc_lib.h"
  18. #ifdef ENABLE_FLYCV
  19. #include "flycv.h" // NOLINT
  20. #endif
  21. #ifdef WITH_GPU
  22. #include <cuda_runtime_api.h>
  23. #endif
  24. namespace ultra_infer {
  25. namespace vision {
  26. enum Layout { HWC, CHW };
  27. /*! @brief FDMat is a structure for replace cv::Mat
  28. */
  29. struct ULTRAINFER_DECL Mat {
  30. Mat() = default;
  31. explicit Mat(const cv::Mat &mat) {
  32. cpu_mat = mat;
  33. layout = Layout::HWC;
  34. height = cpu_mat.rows;
  35. width = cpu_mat.cols;
  36. channels = cpu_mat.channels();
  37. mat_type = ProcLib::OPENCV;
  38. }
  39. #ifdef ENABLE_FLYCV
  40. explicit Mat(const fcv::Mat &mat) {
  41. fcv_mat = mat;
  42. layout = Layout::HWC;
  43. height = fcv_mat.height();
  44. width = fcv_mat.width();
  45. channels = fcv_mat.channels();
  46. mat_type = ProcLib::FLYCV;
  47. }
  48. #endif
  49. Mat(const Mat &mat) = default;
  50. Mat &operator=(const Mat &mat) = default;
  51. // Move constructor
  52. Mat(Mat &&other) = default;
  53. // Careful if you use this interface
  54. // this only used if you don't want to write
  55. // the original data, and write to a new cv::Mat
  56. // then replace the old cv::Mat of this structure
  57. void SetMat(const cv::Mat &mat) {
  58. cpu_mat = mat;
  59. mat_type = ProcLib::OPENCV;
  60. }
  61. cv::Mat *GetOpenCVMat();
  62. #ifdef ENABLE_FLYCV
  63. void SetMat(const fcv::Mat &mat) {
  64. fcv_mat = mat;
  65. mat_type = ProcLib::FLYCV;
  66. }
  67. fcv::Mat *GetFlyCVMat();
  68. #endif
  69. void *Data();
  70. // Get fd_tensor
  71. FDTensor *Tensor();
  72. // Set fd_tensor
  73. void SetTensor(FDTensor *tensor);
  74. void SetTensor(std::shared_ptr<FDTensor> &tensor);
  75. private:
  76. int channels;
  77. int height;
  78. int width;
  79. cv::Mat cpu_mat;
  80. #ifdef ENABLE_FLYCV
  81. fcv::Mat fcv_mat;
  82. #endif
  83. #ifdef WITH_GPU
  84. cudaStream_t stream = nullptr;
  85. #endif
  86. // Currently, fd_tensor is only used by CUDA and CV-CUDA,
  87. // OpenCV and FlyCV are not using it.
  88. std::shared_ptr<FDTensor> fd_tensor = std::make_shared<FDTensor>();
  89. public:
  90. FDDataType Type();
  91. int Channels() const { return channels; }
  92. int Width() const { return width; }
  93. int Height() const { return height; }
  94. void SetChannels(int s) { channels = s; }
  95. void SetWidth(int w) { width = w; }
  96. void SetHeight(int h) { height = h; }
  97. // When using CV-CUDA/CUDA, please set input/output cache,
  98. // refer to manager.cc
  99. FDTensor *input_cache = nullptr;
  100. FDTensor *output_cache = nullptr;
  101. #ifdef WITH_GPU
  102. cudaStream_t Stream() const { return stream; }
  103. void SetStream(cudaStream_t s) { stream = s; }
  104. #endif
  105. // Transfer the vision::Mat to FDTensor
  106. void ShareWithTensor(FDTensor *tensor);
  107. // Only support copy to cpu tensor now
  108. bool CopyToTensor(FDTensor *tensor);
  109. // Debug functions
  110. // TODO(jiangjiajun) Develop a right process pipeline with c++
  111. // is not a easy things, Will add more debug function here to
  112. // help debug processed image. This function will print shape
  113. // and mean of each channels of the Mat
  114. void PrintInfo(const std::string &flag);
  115. ProcLib mat_type = ProcLib::OPENCV;
  116. Layout layout = Layout::HWC;
  117. Device device = Device::CPU;
  118. ProcLib proc_lib = ProcLib::DEFAULT;
  119. // Create FD Mat from FD Tensor. This method only create a
  120. // new FD Mat with zero copy and it's data pointer is reference
  121. // to the original memory buffer of input FD Tensor. Carefully,
  122. // any operation on this Mat may change memory that points to
  123. // FDTensor. We assume that the memory Mat points to is mutable.
  124. // This method will create a FD Mat according to current global
  125. // default ProcLib (OPENCV,FLYCV,...).
  126. static Mat Create(const FDTensor &tensor);
  127. static Mat Create(const FDTensor &tensor, ProcLib lib);
  128. static Mat Create(int height, int width, int channels, FDDataType type,
  129. void *data);
  130. static Mat Create(int height, int width, int channels, FDDataType type,
  131. void *data, ProcLib lib);
  132. };
  133. typedef Mat FDMat;
  134. /*
  135. * @brief Wrap a cv::Mat to FDMat, there's no memory copy, memory buffer is
  136. * managed by user
  137. */
  138. ULTRAINFER_DECL FDMat WrapMat(const cv::Mat &image);
  139. /*
  140. * Warp a vector<cv::Mat> to vector<FDMat>, there's no memory copy, memory
  141. * buffer is managed by user
  142. */
  143. ULTRAINFER_DECL std::vector<FDMat> WrapMat(const std::vector<cv::Mat> &images);
  144. bool CheckShapeConsistency(std::vector<Mat> *mats);
  145. // Create an input tensor on GPU and save into input_cache.
  146. // If the Mat is on GPU, return the mat->Tensor() directly.
  147. // If the Mat is on CPU, then update the input cache tensor and copy the mat's
  148. // CPU tensor to this new GPU input cache tensor.
  149. FDTensor *CreateCachedGpuInputTensor(Mat *mat);
  150. } // namespace vision
  151. } // namespace ultra_infer