yolov7face.py 6.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193
  1. # Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved.
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. from __future__ import absolute_import
  15. from .... import UltraInferModel, ModelFormat
  16. from .... import c_lib_wrap as C
  17. class Yolov7FacePreprocessor:
  18. def __init__(self):
  19. """Create a preprocessor for Yolov7Face"""
  20. self._preprocessor = C.vision.facedet.Yolov7Preprocessor()
  21. def run(self, input_ims):
  22. """Preprocess input images for Yolov7Face
  23. :param: input_ims: (list of numpy.ndarray)The input image
  24. :return: list of FDTensor
  25. """
  26. return self._preprocessor.run(input_ims)
  27. @property
  28. def size(self):
  29. """
  30. Argument for image preprocessing step, the preprocess image size, tuple of (width, height), default size = [640, 640]
  31. """
  32. return self._preprocessor.size
  33. @property
  34. def padding_color_value(self):
  35. """
  36. padding value for preprocessing, default [114.0, 114.0, 114.0]
  37. """
  38. # padding value, size should be the same as channels
  39. return self._preprocessor.padding_color_value
  40. @property
  41. def is_scale_up(self):
  42. """
  43. is_scale_up for preprocessing, the input image only can be zoom out, the maximum resize scale cannot exceed 1.0, default true
  44. """
  45. return self._preprocessor.is_scale_up
  46. @size.setter
  47. def size(self, wh):
  48. assert isinstance(
  49. wh, (list, tuple)
  50. ), "The value to set `size` must be type of tuple or list."
  51. assert (
  52. len(wh) == 2
  53. ), "The value to set `size` must contains 2 elements means [width, height], but now it contains {} elements.".format(
  54. len(wh)
  55. )
  56. self._preprocessor.size = wh
  57. @padding_color_value.setter
  58. def padding_color_value(self, value):
  59. assert isinstance(
  60. value, list
  61. ), "The value to set `padding_color_value` must be type of list."
  62. self._preprocessor.padding_color_value = value
  63. @is_scale_up.setter
  64. def is_scale_up(self, value):
  65. assert isinstance(
  66. value, bool
  67. ), "The value to set `is_scale_up` must be type of bool."
  68. self._preprocessor.is_scale_up = value
  69. class Yolov7FacePostprocessor:
  70. def __init__(self):
  71. """Create a postprocessor for Yolov7Face"""
  72. self._postprocessor = C.vision.facedet.Yolov7FacePostprocessor()
  73. def run(self, runtime_results, ims_info):
  74. """Postprocess the runtime results for Yolov7Face
  75. :param: runtime_results: (list of FDTensor)The output FDTensor results from runtime
  76. :param: ims_info: (list of dict)Record input_shape and output_shape
  77. :return: list of DetectionResult(If the runtime_results is predict by batched samples, the length of this list equals to the batch size)
  78. """
  79. return self._postprocessor.run(runtime_results, ims_info)
  80. @property
  81. def conf_threshold(self):
  82. """
  83. confidence threshold for postprocessing, default is 0.5
  84. """
  85. return self._postprocessor.conf_threshold
  86. @property
  87. def nms_threshold(self):
  88. """
  89. nms threshold for postprocessing, default is 0.45
  90. """
  91. return self._postprocessor.nms_threshold
  92. @property
  93. def landmarks_per_face(self):
  94. """
  95. landmarks per face for postprocessing, default is 5
  96. """
  97. return self._postprocessor.landmarks_per_face
  98. @conf_threshold.setter
  99. def conf_threshold(self, conf_threshold):
  100. assert isinstance(
  101. conf_threshold, float
  102. ), "The value to set `conf_threshold` must be type of float."
  103. self._postprocessor.conf_threshold = conf_threshold
  104. @nms_threshold.setter
  105. def nms_threshold(self, nms_threshold):
  106. assert isinstance(
  107. nms_threshold, float
  108. ), "The value to set `nms_threshold` must be type of float."
  109. self._postprocessor.nms_threshold = nms_threshold
  110. @landmarks_per_face.setter
  111. def landmarks_per_face(self, landmarks_per_face):
  112. assert isinstance(
  113. landmarks_per_face, int
  114. ), "The value to set `landmarks_per_face` must be type of int."
  115. self._postprocessor.landmarks_per_face = landmarks_per_face
  116. class YOLOv7Face(UltraInferModel):
  117. def __init__(
  118. self,
  119. model_file,
  120. params_file="",
  121. runtime_option=None,
  122. model_format=ModelFormat.ONNX,
  123. ):
  124. """Load a YOLOv7Face model exported by YOLOv7Face.
  125. :param model_file: (str)Path of model file, e.g ./yolov7face.onnx
  126. :param params_file: (str)Path of parameters file, e.g yolox/model.pdiparams, if the model_fomat is ModelFormat.ONNX, this param will be ignored, can be set as empty string
  127. :param runtime_option: (ultra_infer.RuntimeOption)RuntimeOption for inference this model, if it's None, will use the default backend on CPU
  128. :param model_format: (ultra_infer.ModelForamt)Model format of the loaded model
  129. """
  130. super(YOLOv7Face, self).__init__(runtime_option)
  131. self._model = C.vision.facedet.YOLOv7Face(
  132. model_file, params_file, self._runtime_option, model_format
  133. )
  134. assert self.initialized, "YOLOv7Face initialize failed."
  135. def predict(self, input_image):
  136. """Detect the location and key points of human faces from an input image
  137. :param input_image: (numpy.ndarray)The input image data, 3-D array with layout HWC, BGR format
  138. :return: FaceDetectionResult
  139. """
  140. return self._model.predict(input_image)
  141. def batch_predict(self, images):
  142. """Classify a batch of input image
  143. :param im: (list of numpy.ndarray) The input image list, each element is a 3-D array with layout HWC, BGR format
  144. :return list of DetectionResult
  145. """
  146. return self._model.batch_predict(images)
  147. @property
  148. def preprocessor(self):
  149. """Get YOLOv7Preprocessor object of the loaded model
  150. :return YOLOv7Preprocessor
  151. """
  152. return self._model.preprocessor
  153. @property
  154. def postprocessor(self):
  155. """Get YOLOv7Postprocessor object of the loaded model
  156. :return YOLOv7Postprocessor
  157. """
  158. return self._model.postprocessor