postprocessor.cc 2.5 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667
  1. // Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. #include "ultra_infer/vision/faceid/contrib/insightface/postprocessor.h"
  15. #include "ultra_infer/vision/utils/utils.h"
  16. namespace ultra_infer {
  17. namespace vision {
  18. namespace faceid {
  19. InsightFaceRecognitionPostprocessor::InsightFaceRecognitionPostprocessor() {
  20. l2_normalize_ = false;
  21. }
  22. bool InsightFaceRecognitionPostprocessor::Run(
  23. std::vector<FDTensor> &infer_result,
  24. std::vector<FaceRecognitionResult> *results) {
  25. if (infer_result[0].dtype != FDDataType::FP32) {
  26. FDERROR << "Only support post process with float32 data." << std::endl;
  27. return false;
  28. }
  29. if (infer_result.size() != 1) {
  30. FDERROR << "The default number of output tensor "
  31. "must be 1 according to insightface."
  32. << std::endl;
  33. }
  34. int batch = infer_result[0].shape[0];
  35. results->resize(batch);
  36. for (size_t bs = 0; bs < batch; ++bs) {
  37. FDTensor &embedding_tensor = infer_result.at(bs);
  38. FDASSERT((embedding_tensor.shape[0] == 1), "Only support batch = 1 now.");
  39. if (embedding_tensor.dtype != FDDataType::FP32) {
  40. FDERROR << "Only support post process with float32 data." << std::endl;
  41. return false;
  42. }
  43. (*results)[bs].Clear();
  44. (*results)[bs].Resize(embedding_tensor.Numel());
  45. // Copy the raw embedding vector directly without L2 normalize
  46. // post process. Let the user decide whether to normalize or not.
  47. // Will call utils::L2Normlize() method to perform L2
  48. // normalize if l2_normalize was set as 'true'.
  49. std::memcpy((*results)[bs].embedding.data(), embedding_tensor.Data(),
  50. embedding_tensor.Nbytes());
  51. if (l2_normalize_) {
  52. auto norm_embedding = utils::L2Normalize((*results)[bs].embedding);
  53. std::memcpy((*results)[bs].embedding.data(), norm_embedding.data(),
  54. embedding_tensor.Nbytes());
  55. }
  56. }
  57. return true;
  58. }
  59. } // namespace faceid
  60. } // namespace vision
  61. } // namespace ultra_infer