postprocessor.cc 5.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143
  1. // Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. #include "ultra_infer/vision/detection/contrib/yolov8/postprocessor.h"
  15. #include "ultra_infer/vision/utils/utils.h"
  16. namespace ultra_infer {
  17. namespace vision {
  18. namespace detection {
  19. YOLOv8Postprocessor::YOLOv8Postprocessor() {
  20. conf_threshold_ = 0.25;
  21. nms_threshold_ = 0.5;
  22. multi_label_ = true;
  23. max_wh_ = 7680.0;
  24. }
  25. bool YOLOv8Postprocessor::Run(
  26. const std::vector<FDTensor> &tensors, std::vector<DetectionResult> *results,
  27. const std::vector<std::map<std::string, std::array<float, 2>>> &ims_info) {
  28. int batch = tensors[0].shape[0];
  29. // transpose
  30. std::vector<int64_t> dim{0, 2, 1};
  31. FDTensor tensor_transpose;
  32. function::Transpose(tensors[0], &tensor_transpose, dim);
  33. results->resize(batch);
  34. for (size_t bs = 0; bs < batch; ++bs) {
  35. (*results)[bs].Clear();
  36. if (multi_label_) {
  37. (*results)[bs].Reserve(tensor_transpose.shape[1] *
  38. (tensor_transpose.shape[2] - 4));
  39. } else {
  40. (*results)[bs].Reserve(tensor_transpose.shape[1]);
  41. }
  42. if (tensor_transpose.dtype != FDDataType::FP32) {
  43. FDERROR << "Only support post process with float32 data." << std::endl;
  44. return false;
  45. }
  46. const float *data =
  47. reinterpret_cast<const float *>(tensor_transpose.Data()) +
  48. bs * tensor_transpose.shape[1] * tensor_transpose.shape[2];
  49. for (size_t i = 0; i < tensor_transpose.shape[1]; ++i) {
  50. int s = i * tensor_transpose.shape[2];
  51. if (multi_label_) {
  52. for (size_t j = 4; j < tensor_transpose.shape[2]; ++j) {
  53. float confidence = data[s + j];
  54. // filter boxes by conf_threshold
  55. if (confidence <= conf_threshold_) {
  56. continue;
  57. }
  58. int32_t label_id = j - 4;
  59. // convert from [x, y, w, h] to [x1, y1, x2, y2]
  60. (*results)[bs].boxes.emplace_back(std::array<float, 4>{
  61. data[s] - data[s + 2] / 2.0f + label_id * max_wh_,
  62. data[s + 1] - data[s + 3] / 2.0f + label_id * max_wh_,
  63. data[s + 0] + data[s + 2] / 2.0f + label_id * max_wh_,
  64. data[s + 1] + data[s + 3] / 2.0f + label_id * max_wh_});
  65. (*results)[bs].label_ids.push_back(label_id);
  66. (*results)[bs].scores.push_back(confidence);
  67. }
  68. } else {
  69. const float *max_class_score = std::max_element(
  70. data + s + 4, data + s + tensor_transpose.shape[2]);
  71. float confidence = *max_class_score;
  72. // filter boxes by conf_threshold
  73. if (confidence <= conf_threshold_) {
  74. continue;
  75. }
  76. int32_t label_id = std::distance(data + s + 4, max_class_score);
  77. // convert from [x, y, w, h] to [x1, y1, x2, y2]
  78. (*results)[bs].boxes.emplace_back(std::array<float, 4>{
  79. data[s] - data[s + 2] / 2.0f + label_id * max_wh_,
  80. data[s + 1] - data[s + 3] / 2.0f + label_id * max_wh_,
  81. data[s + 0] + data[s + 2] / 2.0f + label_id * max_wh_,
  82. data[s + 1] + data[s + 3] / 2.0f + label_id * max_wh_});
  83. (*results)[bs].label_ids.push_back(label_id);
  84. (*results)[bs].scores.push_back(confidence);
  85. }
  86. }
  87. if ((*results)[bs].boxes.size() == 0) {
  88. return true;
  89. }
  90. utils::NMS(&((*results)[bs]), nms_threshold_);
  91. // scale the boxes to the origin image shape
  92. auto iter_out = ims_info[bs].find("output_shape");
  93. auto iter_ipt = ims_info[bs].find("input_shape");
  94. FDASSERT(iter_out != ims_info[bs].end() && iter_ipt != ims_info[bs].end(),
  95. "Cannot find input_shape or output_shape from im_info.");
  96. float out_h = iter_out->second[0];
  97. float out_w = iter_out->second[1];
  98. float ipt_h = iter_ipt->second[0];
  99. float ipt_w = iter_ipt->second[1];
  100. float scale = std::min(out_h / ipt_h, out_w / ipt_w);
  101. float pad_h = (out_h - ipt_h * scale) / 2;
  102. float pad_w = (out_w - ipt_w * scale) / 2;
  103. for (size_t i = 0; i < (*results)[bs].boxes.size(); ++i) {
  104. int32_t label_id = ((*results)[bs].label_ids)[i];
  105. // clip box
  106. (*results)[bs].boxes[i][0] =
  107. (*results)[bs].boxes[i][0] - max_wh_ * label_id;
  108. (*results)[bs].boxes[i][1] =
  109. (*results)[bs].boxes[i][1] - max_wh_ * label_id;
  110. (*results)[bs].boxes[i][2] =
  111. (*results)[bs].boxes[i][2] - max_wh_ * label_id;
  112. (*results)[bs].boxes[i][3] =
  113. (*results)[bs].boxes[i][3] - max_wh_ * label_id;
  114. (*results)[bs].boxes[i][0] =
  115. std::max(((*results)[bs].boxes[i][0] - pad_w) / scale, 0.0f);
  116. (*results)[bs].boxes[i][1] =
  117. std::max(((*results)[bs].boxes[i][1] - pad_h) / scale, 0.0f);
  118. (*results)[bs].boxes[i][2] =
  119. std::max(((*results)[bs].boxes[i][2] - pad_w) / scale, 0.0f);
  120. (*results)[bs].boxes[i][3] =
  121. std::max(((*results)[bs].boxes[i][3] - pad_h) / scale, 0.0f);
  122. (*results)[bs].boxes[i][0] = std::min((*results)[bs].boxes[i][0], ipt_w);
  123. (*results)[bs].boxes[i][1] = std::min((*results)[bs].boxes[i][1], ipt_h);
  124. (*results)[bs].boxes[i][2] = std::min((*results)[bs].boxes[i][2], ipt_w);
  125. (*results)[bs].boxes[i][3] = std::min((*results)[bs].boxes[i][3], ipt_h);
  126. }
  127. }
  128. return true;
  129. }
  130. } // namespace detection
  131. } // namespace vision
  132. } // namespace ultra_infer