postprocessor.cc 9.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217
  1. // Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. #include "ultra_infer/vision/detection/contrib/yolov5seg/postprocessor.h"
  15. #include "ultra_infer/vision/utils/utils.h"
  16. namespace ultra_infer {
  17. namespace vision {
  18. namespace detection {
  19. YOLOv5SegPostprocessor::YOLOv5SegPostprocessor() {
  20. conf_threshold_ = 0.25;
  21. nms_threshold_ = 0.5;
  22. mask_threshold_ = 0.5;
  23. multi_label_ = true;
  24. max_wh_ = 7680.0;
  25. mask_nums_ = 32;
  26. }
  27. bool YOLOv5SegPostprocessor::Run(
  28. const std::vector<FDTensor> &tensors, std::vector<DetectionResult> *results,
  29. const std::vector<std::map<std::string, std::array<float, 2>>> &ims_info) {
  30. int batch = tensors[0].shape[0];
  31. results->resize(batch);
  32. for (size_t bs = 0; bs < batch; ++bs) {
  33. // store mask information
  34. std::vector<std::vector<float>> mask_embeddings;
  35. (*results)[bs].Clear();
  36. if (multi_label_) {
  37. (*results)[bs].Reserve(tensors[0].shape[1] *
  38. (tensors[0].shape[2] - mask_nums_ - 5));
  39. } else {
  40. (*results)[bs].Reserve(tensors[0].shape[1]);
  41. }
  42. if (tensors[0].dtype != FDDataType::FP32) {
  43. FDERROR << "Only support post process with float32 data." << std::endl;
  44. return false;
  45. }
  46. const float *data = reinterpret_cast<const float *>(tensors[0].Data()) +
  47. bs * tensors[0].shape[1] * tensors[0].shape[2];
  48. for (size_t i = 0; i < tensors[0].shape[1]; ++i) {
  49. int s = i * tensors[0].shape[2];
  50. float cls_conf = data[s + 4];
  51. float confidence = data[s + 4];
  52. std::vector<float> mask_embedding(data + s + tensors[0].shape[2] -
  53. mask_nums_,
  54. data + s + tensors[0].shape[2]);
  55. for (size_t k = 0; k < mask_embedding.size(); ++k) {
  56. mask_embedding[k] *= cls_conf;
  57. }
  58. if (multi_label_) {
  59. for (size_t j = 5; j < tensors[0].shape[2] - mask_nums_; ++j) {
  60. confidence = data[s + 4];
  61. const float *class_score = data + s + j;
  62. confidence *= (*class_score);
  63. // filter boxes by conf_threshold
  64. if (confidence <= conf_threshold_) {
  65. continue;
  66. }
  67. int32_t label_id = std::distance(data + s + 5, class_score);
  68. // convert from [x, y, w, h] to [x1, y1, x2, y2]
  69. (*results)[bs].boxes.emplace_back(std::array<float, 4>{
  70. data[s] - data[s + 2] / 2.0f + label_id * max_wh_,
  71. data[s + 1] - data[s + 3] / 2.0f + label_id * max_wh_,
  72. data[s + 0] + data[s + 2] / 2.0f + label_id * max_wh_,
  73. data[s + 1] + data[s + 3] / 2.0f + label_id * max_wh_});
  74. (*results)[bs].label_ids.push_back(label_id);
  75. (*results)[bs].scores.push_back(confidence);
  76. // TODO(wangjunjie06): No zero copy
  77. mask_embeddings.push_back(mask_embedding);
  78. }
  79. } else {
  80. const float *max_class_score = std::max_element(
  81. data + s + 5, data + s + tensors[0].shape[2] - mask_nums_);
  82. confidence *= (*max_class_score);
  83. // filter boxes by conf_threshold
  84. if (confidence <= conf_threshold_) {
  85. continue;
  86. }
  87. int32_t label_id = std::distance(data + s + 5, max_class_score);
  88. // convert from [x, y, w, h] to [x1, y1, x2, y2]
  89. (*results)[bs].boxes.emplace_back(std::array<float, 4>{
  90. data[s] - data[s + 2] / 2.0f + label_id * max_wh_,
  91. data[s + 1] - data[s + 3] / 2.0f + label_id * max_wh_,
  92. data[s + 0] + data[s + 2] / 2.0f + label_id * max_wh_,
  93. data[s + 1] + data[s + 3] / 2.0f + label_id * max_wh_});
  94. (*results)[bs].label_ids.push_back(label_id);
  95. (*results)[bs].scores.push_back(confidence);
  96. mask_embeddings.push_back(mask_embedding);
  97. }
  98. }
  99. if ((*results)[bs].boxes.size() == 0) {
  100. return true;
  101. }
  102. // get box index after nms
  103. std::vector<int> index;
  104. utils::NMS(&((*results)[bs]), nms_threshold_, &index);
  105. // deal with mask
  106. // step1: MatMul, (box_nums * 32) x (32 * 160 * 160) = box_nums * 160 * 160
  107. // step2: Sigmoid
  108. // step3: Resize to original image size
  109. // step4: Select pixels greater than threshold and crop
  110. (*results)[bs].contain_masks = true;
  111. (*results)[bs].masks.resize((*results)[bs].boxes.size());
  112. const float *data_mask =
  113. reinterpret_cast<const float *>(tensors[1].Data()) +
  114. bs * tensors[1].shape[1] * tensors[1].shape[2] * tensors[1].shape[3];
  115. cv::Mat mask_proto =
  116. cv::Mat(tensors[1].shape[1], tensors[1].shape[2] * tensors[1].shape[3],
  117. CV_32FC(1), const_cast<float *>(data_mask));
  118. // vector to cv::Mat for MatMul
  119. // after push_back, Mat of m*n becomes (m + 1) * n
  120. cv::Mat mask_proposals;
  121. for (size_t i = 0; i < index.size(); ++i) {
  122. mask_proposals.push_back(cv::Mat(mask_embeddings[index[i]]).t());
  123. }
  124. cv::Mat matmul_result = (mask_proposals * mask_proto).t();
  125. cv::Mat masks = matmul_result.reshape(
  126. (*results)[bs].boxes.size(), {static_cast<int>(tensors[1].shape[2]),
  127. static_cast<int>(tensors[1].shape[3])});
  128. // split for boxes nums
  129. std::vector<cv::Mat> mask_channels;
  130. cv::split(masks, mask_channels);
  131. // scale the boxes to the origin image shape
  132. auto iter_out = ims_info[bs].find("output_shape");
  133. auto iter_ipt = ims_info[bs].find("input_shape");
  134. FDASSERT(iter_out != ims_info[bs].end() && iter_ipt != ims_info[bs].end(),
  135. "Cannot find input_shape or output_shape from im_info.");
  136. float out_h = iter_out->second[0];
  137. float out_w = iter_out->second[1];
  138. float ipt_h = iter_ipt->second[0];
  139. float ipt_w = iter_ipt->second[1];
  140. float scale = std::min(out_h / ipt_h, out_w / ipt_w);
  141. float pad_h = (out_h - ipt_h * scale) / 2;
  142. float pad_w = (out_w - ipt_w * scale) / 2;
  143. // for mask
  144. float pad_h_mask = (float)pad_h / out_h * tensors[1].shape[2];
  145. float pad_w_mask = (float)pad_w / out_w * tensors[1].shape[3];
  146. for (size_t i = 0; i < (*results)[bs].boxes.size(); ++i) {
  147. int32_t label_id = ((*results)[bs].label_ids)[i];
  148. // clip box
  149. (*results)[bs].boxes[i][0] =
  150. (*results)[bs].boxes[i][0] - max_wh_ * label_id;
  151. (*results)[bs].boxes[i][1] =
  152. (*results)[bs].boxes[i][1] - max_wh_ * label_id;
  153. (*results)[bs].boxes[i][2] =
  154. (*results)[bs].boxes[i][2] - max_wh_ * label_id;
  155. (*results)[bs].boxes[i][3] =
  156. (*results)[bs].boxes[i][3] - max_wh_ * label_id;
  157. (*results)[bs].boxes[i][0] =
  158. std::max(((*results)[bs].boxes[i][0] - pad_w) / scale, 0.0f);
  159. (*results)[bs].boxes[i][1] =
  160. std::max(((*results)[bs].boxes[i][1] - pad_h) / scale, 0.0f);
  161. (*results)[bs].boxes[i][2] =
  162. std::max(((*results)[bs].boxes[i][2] - pad_w) / scale, 0.0f);
  163. (*results)[bs].boxes[i][3] =
  164. std::max(((*results)[bs].boxes[i][3] - pad_h) / scale, 0.0f);
  165. (*results)[bs].boxes[i][0] = std::min((*results)[bs].boxes[i][0], ipt_w);
  166. (*results)[bs].boxes[i][1] = std::min((*results)[bs].boxes[i][1], ipt_h);
  167. (*results)[bs].boxes[i][2] = std::min((*results)[bs].boxes[i][2], ipt_w);
  168. (*results)[bs].boxes[i][3] = std::min((*results)[bs].boxes[i][3], ipt_h);
  169. // deal with mask
  170. cv::Mat dest, mask;
  171. // sigmoid
  172. cv::exp(-mask_channels[i], dest);
  173. dest = 1.0 / (1.0 + dest);
  174. // crop mask for feature map
  175. int x1 = static_cast<int>(pad_w_mask);
  176. int y1 = static_cast<int>(pad_h_mask);
  177. int x2 = static_cast<int>(tensors[1].shape[3] - pad_w_mask);
  178. int y2 = static_cast<int>(tensors[1].shape[2] - pad_h_mask);
  179. cv::Rect roi(x1, y1, x2 - x1, y2 - y1);
  180. dest = dest(roi);
  181. cv::resize(dest, mask, cv::Size(ipt_w, ipt_h), 0, 0, cv::INTER_LINEAR);
  182. // crop mask for source img
  183. int x1_src = static_cast<int>(round((*results)[bs].boxes[i][0]));
  184. int y1_src = static_cast<int>(round((*results)[bs].boxes[i][1]));
  185. int x2_src = static_cast<int>(round((*results)[bs].boxes[i][2]));
  186. int y2_src = static_cast<int>(round((*results)[bs].boxes[i][3]));
  187. cv::Rect roi_src(x1_src, y1_src, x2_src - x1_src, y2_src - y1_src);
  188. mask = mask(roi_src);
  189. mask = mask > mask_threshold_;
  190. // save mask in DetectionResult
  191. int keep_mask_h = y2_src - y1_src;
  192. int keep_mask_w = x2_src - x1_src;
  193. int keep_mask_numel = keep_mask_h * keep_mask_w;
  194. (*results)[bs].masks[i].Resize(keep_mask_numel);
  195. (*results)[bs].masks[i].shape = {keep_mask_h, keep_mask_w};
  196. uint8_t *keep_mask_ptr =
  197. reinterpret_cast<uint8_t *>((*results)[bs].masks[i].Data());
  198. std::memcpy(keep_mask_ptr, reinterpret_cast<uint8_t *>(mask.ptr()),
  199. keep_mask_numel * sizeof(uint8_t));
  200. }
  201. }
  202. return true;
  203. }
  204. } // namespace detection
  205. } // namespace vision
  206. } // namespace ultra_infer