postprocessor.cc 2.4 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364
  1. // Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. #include "ultra_infer/vision/faceid/contrib/adaface/postprocessor.h"
  15. #include "ultra_infer/vision/utils/utils.h"
  16. namespace ultra_infer {
  17. namespace vision {
  18. namespace faceid {
  19. AdaFacePostprocessor::AdaFacePostprocessor() { l2_normalize_ = false; }
  20. bool AdaFacePostprocessor::Run(std::vector<FDTensor> &infer_result,
  21. std::vector<FaceRecognitionResult> *results) {
  22. if (infer_result[0].dtype != FDDataType::FP32) {
  23. FDERROR << "Only support post process with float32 data." << std::endl;
  24. return false;
  25. }
  26. if (infer_result.size() != 1) {
  27. FDERROR << "The default number of output tensor "
  28. "must be 1 according to insightface."
  29. << std::endl;
  30. }
  31. int batch = infer_result[0].shape[0];
  32. results->resize(batch);
  33. for (size_t bs = 0; bs < batch; ++bs) {
  34. FDTensor &embedding_tensor = infer_result.at(bs);
  35. FDASSERT((embedding_tensor.shape[0] == 1), "Only support batch = 1 now.");
  36. if (embedding_tensor.dtype != FDDataType::FP32) {
  37. FDERROR << "Only support post process with float32 data." << std::endl;
  38. return false;
  39. }
  40. (*results)[bs].Clear();
  41. (*results)[bs].Resize(embedding_tensor.Numel());
  42. // Copy the raw embedding vector directly without L2 normalize
  43. // post process. Let the user decide whether to normalize or not.
  44. // Will call utils::L2Normlize() method to perform L2
  45. // normalize if l2_normalize was set as 'true'.
  46. std::memcpy((*results)[bs].embedding.data(), embedding_tensor.Data(),
  47. embedding_tensor.Nbytes());
  48. if (l2_normalize_) {
  49. auto norm_embedding = utils::L2Normalize((*results)[bs].embedding);
  50. std::memcpy((*results)[bs].embedding.data(), norm_embedding.data(),
  51. embedding_tensor.Nbytes());
  52. }
  53. }
  54. return true;
  55. }
  56. } // namespace faceid
  57. } // namespace vision
  58. } // namespace ultra_infer