image_decoder.cc 3.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112
  1. // Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. #include "ultra_infer/vision/common/image_decoder/image_decoder.h"
  15. #include "opencv2/imgcodecs.hpp"
  16. namespace ultra_infer {
  17. namespace vision {
  18. ImageDecoder::ImageDecoder(ImageDecoderLib lib) {
  19. if (lib == ImageDecoderLib::NVJPEG) {
  20. #ifdef ENABLE_NVJPEG
  21. nvjpeg::init_decoder(nvjpeg_params_);
  22. #endif
  23. }
  24. lib_ = lib;
  25. }
  26. ImageDecoder::~ImageDecoder() {
  27. if (lib_ == ImageDecoderLib::NVJPEG) {
  28. #ifdef ENABLE_NVJPEG
  29. nvjpeg::destroy_decoder(nvjpeg_params_);
  30. #endif
  31. }
  32. }
  33. bool ImageDecoder::Decode(const std::string &img_name, FDMat *mat) {
  34. std::vector<FDMat> mats(1);
  35. mats[0] = std::move(*mat);
  36. if (!BatchDecode({img_name}, &mats)) {
  37. return false;
  38. }
  39. *mat = std::move(mats[0]);
  40. return true;
  41. }
  42. bool ImageDecoder::BatchDecode(const std::vector<std::string> &img_names,
  43. std::vector<FDMat> *mats) {
  44. if (lib_ == ImageDecoderLib::OPENCV) {
  45. return ImplByOpenCV(img_names, mats);
  46. } else if (lib_ == ImageDecoderLib::NVJPEG) {
  47. return ImplByNvJpeg(img_names, mats);
  48. }
  49. return true;
  50. }
  51. bool ImageDecoder::ImplByOpenCV(const std::vector<std::string> &img_names,
  52. std::vector<FDMat> *mats) {
  53. for (size_t i = 0; i < img_names.size(); ++i) {
  54. cv::Mat im = cv::imread(img_names[i]);
  55. (*mats)[i].SetMat(im);
  56. (*mats)[i].layout = Layout::HWC;
  57. (*mats)[i].SetWidth(im.cols);
  58. (*mats)[i].SetHeight(im.rows);
  59. (*mats)[i].SetChannels(im.channels());
  60. }
  61. return true;
  62. }
  63. bool ImageDecoder::ImplByNvJpeg(const std::vector<std::string> &img_names,
  64. std::vector<FDMat> *mats) {
  65. #ifdef ENABLE_NVJPEG
  66. nvjpeg_params_.batch_size = img_names.size();
  67. std::vector<nvjpegImage_t> output_imgs(nvjpeg_params_.batch_size);
  68. std::vector<int> widths(nvjpeg_params_.batch_size);
  69. std::vector<int> heights(nvjpeg_params_.batch_size);
  70. // TODO(wangxinyu): support other output format
  71. nvjpeg_params_.fmt = NVJPEG_OUTPUT_BGRI;
  72. double total;
  73. nvjpeg_params_.stream = (*mats)[0].Stream();
  74. std::vector<FDTensor *> output_buffers;
  75. for (size_t i = 0; i < mats->size(); ++i) {
  76. FDASSERT((*mats)[i].output_cache != nullptr,
  77. "The output_cache of FDMat was not set.");
  78. output_buffers.push_back((*mats)[i].output_cache);
  79. }
  80. if (nvjpeg::process_images(img_names, nvjpeg_params_, total, output_imgs,
  81. output_buffers, widths, heights)) {
  82. // If nvJPEG decode failed, will fallback to OpenCV,
  83. // e.g. png format is not supported by nvJPEG
  84. FDWARNING << "nvJPEG decode failed, falling back to OpenCV for this batch"
  85. << std::endl;
  86. return ImplByOpenCV(img_names, mats);
  87. }
  88. for (size_t i = 0; i < mats->size(); ++i) {
  89. (*mats)[i].mat_type = ProcLib::CUDA;
  90. (*mats)[i].layout = Layout::HWC;
  91. (*mats)[i].SetTensor(output_buffers[i]);
  92. }
  93. #else
  94. FDASSERT(false, "UltraInfer didn't compile with NVJPEG.");
  95. #endif
  96. return true;
  97. }
  98. } // namespace vision
  99. } // namespace ultra_infer