preprocessor.cc 2.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384
  1. // Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. #include "ultra_infer/vision/detection/contrib/fastestdet/preprocessor.h"
  15. #include "ultra_infer/function/concat.h"
  16. namespace ultra_infer {
  17. namespace vision {
  18. namespace detection {
  19. FastestDetPreprocessor::FastestDetPreprocessor() {
  20. size_ = {352, 352}; //{h,w}
  21. }
  22. bool FastestDetPreprocessor::Preprocess(
  23. FDMat *mat, FDTensor *output,
  24. std::map<std::string, std::array<float, 2>> *im_info) {
  25. // Record the shape of image and the shape of preprocessed image
  26. (*im_info)["input_shape"] = {static_cast<float>(mat->Height()),
  27. static_cast<float>(mat->Width())};
  28. // process after image load
  29. double ratio = (size_[0] * 1.0) / std::max(static_cast<float>(mat->Height()),
  30. static_cast<float>(mat->Width()));
  31. // fastestdet's preprocess steps
  32. // 1. resize
  33. // 2. convert_and_permute(swap_rb=false)
  34. Resize::Run(mat, size_[0], size_[1]); // resize
  35. std::vector<float> alpha = {1.0f / 255.0f, 1.0f / 255.0f, 1.0f / 255.0f};
  36. std::vector<float> beta = {0.0f, 0.0f, 0.0f};
  37. // convert to float and HWC2CHW
  38. ConvertAndPermute::Run(mat, alpha, beta, false);
  39. // Record output shape of preprocessed image
  40. (*im_info)["output_shape"] = {static_cast<float>(mat->Height()),
  41. static_cast<float>(mat->Width())};
  42. mat->ShareWithTensor(output);
  43. output->ExpandDim(0); // reshape to n, c, h, w
  44. return true;
  45. }
  46. bool FastestDetPreprocessor::Run(
  47. std::vector<FDMat> *images, std::vector<FDTensor> *outputs,
  48. std::vector<std::map<std::string, std::array<float, 2>>> *ims_info) {
  49. if (images->size() == 0) {
  50. FDERROR << "The size of input images should be greater than 0."
  51. << std::endl;
  52. return false;
  53. }
  54. ims_info->resize(images->size());
  55. outputs->resize(1);
  56. // Concat all the preprocessed data to a batch tensor
  57. std::vector<FDTensor> tensors(images->size());
  58. for (size_t i = 0; i < images->size(); ++i) {
  59. if (!Preprocess(&(*images)[i], &tensors[i], &(*ims_info)[i])) {
  60. FDERROR << "Failed to preprocess input image." << std::endl;
  61. return false;
  62. }
  63. }
  64. if (tensors.size() == 1) {
  65. (*outputs)[0] = std::move(tensors[0]);
  66. } else {
  67. function::Concat(tensors, &((*outputs)[0]), 0);
  68. }
  69. return true;
  70. }
  71. } // namespace detection
  72. } // namespace vision
  73. } // namespace ultra_infer