yolov7.py 6.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187
  1. # Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved.
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. from __future__ import absolute_import
  15. from .... import UltraInferModel, ModelFormat
  16. from .... import c_lib_wrap as C
  17. class YOLOv7Preprocessor:
  18. def __init__(self):
  19. """Create a preprocessor for YOLOv7"""
  20. self._preprocessor = C.vision.detection.YOLOv7Preprocessor()
  21. def run(self, input_ims):
  22. """Preprocess input images for YOLOv7
  23. :param: input_ims: (list of numpy.ndarray)The input image
  24. :return: list of FDTensor
  25. """
  26. return self._preprocessor.run(input_ims)
  27. @property
  28. def size(self):
  29. """
  30. Argument for image preprocessing step, the preprocess image size, tuple of (width, height), default size = [640, 640]
  31. """
  32. return self._preprocessor.size
  33. @property
  34. def padding_value(self):
  35. """
  36. padding value for preprocessing, default [114.0, 114.0, 114.0]
  37. """
  38. # padding value, size should be the same as channels
  39. return self._preprocessor.padding_value
  40. @property
  41. def is_scale_up(self):
  42. """
  43. is_scale_up for preprocessing, the input image only can be zoom out, the maximum resize scale cannot exceed 1.0, default true
  44. """
  45. return self._preprocessor.is_scale_up
  46. @size.setter
  47. def size(self, wh):
  48. assert isinstance(
  49. wh, (list, tuple)
  50. ), "The value to set `size` must be type of tuple or list."
  51. assert (
  52. len(wh) == 2
  53. ), "The value to set `size` must contatins 2 elements means [width, height], but now it contains {} elements.".format(
  54. len(wh)
  55. )
  56. self._preprocessor.size = wh
  57. @padding_value.setter
  58. def padding_value(self, value):
  59. assert isinstance(
  60. value, list
  61. ), "The value to set `padding_value` must be type of list."
  62. self._preprocessor.padding_value = value
  63. @is_scale_up.setter
  64. def is_scale_up(self, value):
  65. assert isinstance(
  66. value, bool
  67. ), "The value to set `is_scale_up` must be type of bool."
  68. self._preprocessor.is_scale_up = value
  69. class YOLOv7Postprocessor:
  70. def __init__(self):
  71. """Create a postprocessor for YOLOv7"""
  72. self._postprocessor = C.vision.detection.YOLOv7Postprocessor()
  73. def run(self, runtime_results, ims_info):
  74. """Postprocess the runtime results for YOLOv7
  75. :param: runtime_results: (list of FDTensor)The output FDTensor results from runtime
  76. :param: ims_info: (list of dict)Record input_shape and output_shape
  77. :return: list of DetectionResult(If the runtime_results is predict by batched samples, the length of this list equals to the batch size)
  78. """
  79. return self._postprocessor.run(runtime_results, ims_info)
  80. @property
  81. def conf_threshold(self):
  82. """
  83. confidence threshold for postprocessing, default is 0.25
  84. """
  85. return self._postprocessor.conf_threshold
  86. @property
  87. def nms_threshold(self):
  88. """
  89. nms threshold for postprocessing, default is 0.5
  90. """
  91. return self._postprocessor.nms_threshold
  92. @conf_threshold.setter
  93. def conf_threshold(self, conf_threshold):
  94. assert isinstance(
  95. conf_threshold, float
  96. ), "The value to set `conf_threshold` must be type of float."
  97. self._postprocessor.conf_threshold = conf_threshold
  98. @nms_threshold.setter
  99. def nms_threshold(self, nms_threshold):
  100. assert isinstance(
  101. nms_threshold, float
  102. ), "The value to set `nms_threshold` must be type of float."
  103. self._postprocessor.nms_threshold = nms_threshold
  104. class YOLOv7(UltraInferModel):
  105. def __init__(
  106. self,
  107. model_file,
  108. params_file="",
  109. runtime_option=None,
  110. model_format=ModelFormat.ONNX,
  111. ):
  112. """Load a YOLOv7 model exported by YOLOv7.
  113. :param model_file: (str)Path of model file, e.g ./yolov7.onnx
  114. :param params_file: (str)Path of parameters file, e.g yolox/model.pdiparams, if the model_fomat is ModelFormat.ONNX, this param will be ignored, can be set as empty string
  115. :param runtime_option: (ultra_infer.RuntimeOption)RuntimeOption for inference this model, if it's None, will use the default backend on CPU
  116. :param model_format: (ultra_infer.ModelForamt)Model format of the loaded model
  117. """
  118. # 调用基函数进行backend_option的初始化
  119. # 初始化后的option保存在self._runtime_option
  120. super(YOLOv7, self).__init__(runtime_option)
  121. self._model = C.vision.detection.YOLOv7(
  122. model_file, params_file, self._runtime_option, model_format
  123. )
  124. # 通过self.initialized判断整个模型的初始化是否成功
  125. assert self.initialized, "YOLOv7 initialize failed."
  126. def predict(self, input_image, conf_threshold=0.25, nms_iou_threshold=0.5):
  127. """Detect an input image
  128. :param input_image: (numpy.ndarray)The input image data, 3-D array with layout HWC, BGR format
  129. :param conf_threshold: confidence threshold for postprocessing, default is 0.25
  130. :param nms_iou_threshold: iou threshold for NMS, default is 0.5
  131. :return: DetectionResult
  132. """
  133. self.postprocessor.conf_threshold = conf_threshold
  134. self.postprocessor.nms_threshold = nms_iou_threshold
  135. return self._model.predict(input_image)
  136. def batch_predict(self, images):
  137. """Classify a batch of input image
  138. :param im: (list of numpy.ndarray) The input image list, each element is a 3-D array with layout HWC, BGR format
  139. :return list of DetectionResult
  140. """
  141. return self._model.batch_predict(images)
  142. @property
  143. def preprocessor(self):
  144. """Get YOLOv7Preprocessor object of the loaded model
  145. :return YOLOv7Preprocessor
  146. """
  147. return self._model.preprocessor
  148. @property
  149. def postprocessor(self):
  150. """Get YOLOv7Postprocessor object of the loaded model
  151. :return YOLOv7Postprocessor
  152. """
  153. return self._model.postprocessor