postprocessor.cc 4.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133
  1. // Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. #include "ultra_infer/vision/detection/contrib/fastestdet/postprocessor.h"
  15. #include "ultra_infer/vision/utils/utils.h"
  16. namespace ultra_infer {
  17. namespace vision {
  18. namespace detection {
  19. FastestDetPostprocessor::FastestDetPostprocessor() {
  20. conf_threshold_ = 0.65;
  21. nms_threshold_ = 0.45;
  22. }
  23. float FastestDetPostprocessor::Sigmoid(float x) {
  24. return 1.0f / (1.0f + exp(-x));
  25. }
  26. float FastestDetPostprocessor::Tanh(float x) {
  27. return 2.0f / (1.0f + exp(-2 * x)) - 1;
  28. }
  29. bool FastestDetPostprocessor::Run(
  30. const std::vector<FDTensor> &tensors, std::vector<DetectionResult> *results,
  31. const std::vector<std::map<std::string, std::array<float, 2>>> &ims_info) {
  32. int batch = 1;
  33. results->resize(batch);
  34. for (size_t bs = 0; bs < batch; ++bs) {
  35. (*results)[bs].Clear();
  36. // output (1,85,22,22) CHW
  37. const float *output =
  38. reinterpret_cast<const float *>(tensors[0].Data()) +
  39. bs * tensors[0].shape[1] * tensors[0].shape[2] * tensors[0].shape[3];
  40. int output_h = tensors[0].shape[2]; // out map height
  41. int output_w = tensors[0].shape[3]; // out map weight
  42. auto iter_out = ims_info[bs].find("output_shape");
  43. auto iter_ipt = ims_info[bs].find("input_shape");
  44. FDASSERT(iter_out != ims_info[bs].end() && iter_ipt != ims_info[bs].end(),
  45. "Cannot find input_shape or output_shape from im_info.");
  46. float ipt_h = iter_ipt->second[0];
  47. float ipt_w = iter_ipt->second[1];
  48. // handle output boxes from out map
  49. for (int h = 0; h < output_h; h++) {
  50. for (int w = 0; w < output_w; w++) {
  51. // object score
  52. int obj_score_index = (h * output_w) + w;
  53. float obj_score = output[obj_score_index];
  54. // find max class
  55. int category = 0;
  56. float max_score = 0.0f;
  57. int class_num = tensors[0].shape[1] - 5;
  58. for (size_t i = 0; i < class_num; i++) {
  59. obj_score_index =
  60. ((5 + i) * output_h * output_w) + (h * output_w) + w;
  61. float cls_score = output[obj_score_index];
  62. if (cls_score > max_score) {
  63. max_score = cls_score;
  64. category = i;
  65. }
  66. }
  67. float score = pow(max_score, 0.4) * pow(obj_score, 0.6);
  68. // score threshold
  69. if (score <= conf_threshold_) {
  70. continue;
  71. }
  72. if (score > conf_threshold_) {
  73. // handle box x y w h
  74. int x_offset_index = (1 * output_h * output_w) + (h * output_w) + w;
  75. int y_offset_index = (2 * output_h * output_w) + (h * output_w) + w;
  76. int box_width_index = (3 * output_h * output_w) + (h * output_w) + w;
  77. int box_height_index = (4 * output_h * output_w) + (h * output_w) + w;
  78. float x_offset = Tanh(output[x_offset_index]);
  79. float y_offset = Tanh(output[y_offset_index]);
  80. float box_width = Sigmoid(output[box_width_index]);
  81. float box_height = Sigmoid(output[box_height_index]);
  82. float cx = (w + x_offset) / output_w;
  83. float cy = (h + y_offset) / output_h;
  84. // convert from [x, y, w, h] to [x1, y1, x2, y2]
  85. (*results)[bs].boxes.emplace_back(std::array<float, 4>{
  86. cx - box_width / 2.0f, cy - box_height / 2.0f,
  87. cx + box_width / 2.0f, cy + box_height / 2.0f});
  88. (*results)[bs].label_ids.push_back(category);
  89. (*results)[bs].scores.push_back(score);
  90. }
  91. }
  92. }
  93. if ((*results)[bs].boxes.size() == 0) {
  94. return true;
  95. }
  96. // scale boxes to origin shape
  97. for (size_t i = 0; i < (*results)[bs].boxes.size(); ++i) {
  98. (*results)[bs].boxes[i][0] = ((*results)[bs].boxes[i][0]) * ipt_w;
  99. (*results)[bs].boxes[i][1] = ((*results)[bs].boxes[i][1]) * ipt_h;
  100. (*results)[bs].boxes[i][2] = ((*results)[bs].boxes[i][2]) * ipt_w;
  101. (*results)[bs].boxes[i][3] = ((*results)[bs].boxes[i][3]) * ipt_h;
  102. }
  103. // NMS
  104. utils::NMS(&((*results)[bs]), nms_threshold_);
  105. // clip box
  106. for (size_t i = 0; i < (*results)[bs].boxes.size(); ++i) {
  107. (*results)[bs].boxes[i][0] = std::max((*results)[bs].boxes[i][0], 0.0f);
  108. (*results)[bs].boxes[i][1] = std::max((*results)[bs].boxes[i][1], 0.0f);
  109. (*results)[bs].boxes[i][2] = std::min((*results)[bs].boxes[i][2], ipt_w);
  110. (*results)[bs].boxes[i][3] = std::min((*results)[bs].boxes[i][3], ipt_h);
  111. }
  112. }
  113. return true;
  114. }
  115. } // namespace detection
  116. } // namespace vision
  117. } // namespace ultra_infer