postprocessor.cc 5.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151
  1. // Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. #include "ultra_infer/vision/facedet/contrib/centerface/postprocessor.h"
  15. #include "ultra_infer/vision/utils/utils.h"
  16. namespace ultra_infer {
  17. namespace vision {
  18. namespace facedet {
  19. CenterFacePostprocessor::CenterFacePostprocessor() {
  20. conf_threshold_ = 0.5;
  21. nms_threshold_ = 0.3;
  22. landmarks_per_face_ = 5;
  23. }
  24. bool CenterFacePostprocessor::Run(
  25. const std::vector<FDTensor> &infer_result,
  26. std::vector<FaceDetectionResult> *results,
  27. const std::vector<std::map<std::string, std::array<float, 2>>> &ims_info) {
  28. int batch = infer_result[0].shape[0];
  29. results->resize(batch);
  30. FDTensor heatmap = infer_result[0]; //(1 1 160 160)
  31. FDTensor scales = infer_result[1]; //(1 2 160 160)
  32. FDTensor offsets = infer_result[2]; //(1 2 160 160)
  33. FDTensor landmarks = infer_result[3]; //(1 10 160 160)
  34. for (size_t bs = 0; bs < batch; ++bs) {
  35. (*results)[bs].Clear();
  36. (*results)[bs].landmarks_per_face = landmarks_per_face_;
  37. (*results)[bs].Reserve(heatmap.shape[2]);
  38. if (infer_result[0].dtype != FDDataType::FP32) {
  39. FDERROR << "Only support post process with float32 data." << std::endl;
  40. return false;
  41. }
  42. int fea_h = heatmap.shape[2];
  43. int fea_w = heatmap.shape[3];
  44. int spacial_size = fea_w * fea_h;
  45. float *heatmap_out = static_cast<float *>(heatmap.Data());
  46. float *scale0 = static_cast<float *>(scales.Data());
  47. float *scale1 = scale0 + spacial_size;
  48. float *offset0 = static_cast<float *>(offsets.Data());
  49. float *offset1 = offset0 + spacial_size;
  50. float confidence = 0.f;
  51. std::vector<int> ids;
  52. for (int i = 0; i < fea_h; i++) {
  53. for (int j = 0; j < fea_w; j++) {
  54. if (heatmap_out[i * fea_w + j] > conf_threshold_) {
  55. ids.push_back(i);
  56. ids.push_back(j);
  57. }
  58. }
  59. }
  60. auto iter_out = ims_info[bs].find("output_shape");
  61. auto iter_ipt = ims_info[bs].find("input_shape");
  62. FDASSERT(iter_out != ims_info[bs].end() && iter_ipt != ims_info[bs].end(),
  63. "Cannot find input_shape or output_shape from im_info.");
  64. float out_h = iter_out->second[0];
  65. float out_w = iter_out->second[1];
  66. float ipt_h = iter_ipt->second[0];
  67. float ipt_w = iter_ipt->second[1];
  68. float scale_h = ipt_h / out_h;
  69. float scale_w = ipt_w / out_w;
  70. for (int i = 0; i < ids.size() / 2; i++) {
  71. int id_h = ids[2 * i];
  72. int id_w = ids[2 * i + 1];
  73. int index = id_h * fea_w + id_w;
  74. confidence = heatmap_out[index];
  75. float s0 = std::exp(scale0[index]) * 4;
  76. float s1 = std::exp(scale1[index]) * 4;
  77. float o0 = offset0[index];
  78. float o1 = offset1[index];
  79. float x1 = (id_w + o1 + 0.5) * 4 - s1 / 2 > 0.f
  80. ? (id_w + o1 + 0.5) * 4 - s1 / 2
  81. : 0;
  82. float y1 = (id_h + o0 + 0.5) * 4 - s0 / 2 > 0
  83. ? (id_h + o0 + 0.5) * 4 - s0 / 2
  84. : 0;
  85. float x2 = 0, y2 = 0;
  86. x1 = x1 < (float)out_w ? x1 : (float)out_w;
  87. y1 = y1 < (float)out_h ? y1 : (float)out_h;
  88. x2 = x1 + s1 < (float)out_w ? x1 + s1 : (float)out_w;
  89. y2 = y1 + s0 < (float)out_h ? y1 + s0 : (float)out_h;
  90. (*results)[bs].boxes.emplace_back(std::array<float, 4>{x1, y1, x2, y2});
  91. (*results)[bs].scores.push_back(confidence);
  92. // decode landmarks (default 5 landmarks)
  93. if (landmarks_per_face_ > 0) {
  94. // reference: utils/box_utils.py#L241
  95. for (size_t j = 0; j < landmarks_per_face_; j++) {
  96. float *xmap = (float *)landmarks.Data() + (2 * j + 1) * spacial_size;
  97. float *ymap = (float *)landmarks.Data() + (2 * j) * spacial_size;
  98. float lx = (x1 + xmap[index] * s1) * scale_w;
  99. float ly = (y1 + ymap[index] * s0) * scale_h;
  100. (*results)[bs].landmarks.emplace_back(std::array<float, 2>{lx, ly});
  101. }
  102. }
  103. }
  104. if ((*results)[bs].boxes.size() == 0) {
  105. return true;
  106. }
  107. utils::NMS(&((*results)[bs]), nms_threshold_);
  108. for (size_t i = 0; i < (*results)[bs].boxes.size(); ++i) {
  109. (*results)[bs].boxes[i][0] =
  110. std::max((*results)[bs].boxes[i][0] * scale_w, 0.0f);
  111. (*results)[bs].boxes[i][1] =
  112. std::max((*results)[bs].boxes[i][1] * scale_h, 0.0f);
  113. (*results)[bs].boxes[i][2] =
  114. std::max((*results)[bs].boxes[i][2] * scale_w, 0.0f);
  115. (*results)[bs].boxes[i][3] =
  116. std::max((*results)[bs].boxes[i][3] * scale_h, 0.0f);
  117. (*results)[bs].boxes[i][0] =
  118. std::min((*results)[bs].boxes[i][0], ipt_w - 1.0f);
  119. (*results)[bs].boxes[i][1] =
  120. std::min((*results)[bs].boxes[i][1], ipt_h - 1.0f);
  121. (*results)[bs].boxes[i][2] =
  122. std::min((*results)[bs].boxes[i][2], ipt_w - 1.0f);
  123. (*results)[bs].boxes[i][3] =
  124. std::min((*results)[bs].boxes[i][3], ipt_h - 1.0f);
  125. }
  126. }
  127. return true;
  128. }
  129. } // namespace facedet
  130. } // namespace vision
  131. } // namespace ultra_infer