yolov8.py 7.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222
  1. # Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved.
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. from __future__ import absolute_import
  15. from .... import UltraInferModel, ModelFormat
  16. from .... import c_lib_wrap as C
  17. class YOLOv8Preprocessor:
  18. def __init__(self):
  19. """Create a preprocessor for YOLOv8"""
  20. self._preprocessor = C.vision.detection.YOLOv8Preprocessor()
  21. def run(self, input_ims):
  22. """Preprocess input images for YOLOv8
  23. :param: input_ims: (list of numpy.ndarray)The input image
  24. :return: list of FDTensor
  25. """
  26. return self._preprocessor.run(input_ims)
  27. @property
  28. def size(self):
  29. """
  30. Argument for image preprocessing step, the preprocess image size, tuple of (width, height), default size = [640, 640]
  31. """
  32. return self._preprocessor.size
  33. @property
  34. def padding_value(self):
  35. """
  36. padding value for preprocessing, default [114.0, 114.0, 114.0]
  37. """
  38. # padding value, size should be the same as channels
  39. return self._preprocessor.padding_value
  40. @property
  41. def is_scale_up(self):
  42. """
  43. is_scale_up for preprocessing, the input image only can be zoom out, the maximum resize scale cannot exceed 1.0, default true
  44. """
  45. return self._preprocessor.is_scale_up
  46. @property
  47. def is_mini_pad(self):
  48. """
  49. is_mini_pad for preprocessing, pad to the minimum rectangle which height and width is times of stride, default false
  50. """
  51. return self._preprocessor.is_mini_pad
  52. @property
  53. def stride(self):
  54. """
  55. stride for preprocessing, only for mini_pad mode, default 32
  56. """
  57. return self._preprocessor.stride
  58. @size.setter
  59. def size(self, wh):
  60. assert isinstance(
  61. wh, (list, tuple)
  62. ), "The value to set `size` must be type of tuple or list."
  63. assert (
  64. len(wh) == 2
  65. ), "The value to set `size` must contatins 2 elements means [width, height], but now it contains {} elements.".format(
  66. len(wh)
  67. )
  68. self._preprocessor.size = wh
  69. @padding_value.setter
  70. def padding_value(self, value):
  71. assert isinstance(
  72. value, list
  73. ), "The value to set `padding_value` must be type of list."
  74. self._preprocessor.padding_value = value
  75. @is_scale_up.setter
  76. def is_scale_up(self, value):
  77. assert isinstance(
  78. value, bool
  79. ), "The value to set `is_scale_up` must be type of bool."
  80. self._preprocessor.is_scale_up = value
  81. @is_mini_pad.setter
  82. def is_mini_pad(self, value):
  83. assert isinstance(
  84. value, bool
  85. ), "The value to set `is_mini_pad` must be type of bool."
  86. self._preprocessor.is_mini_pad = value
  87. @stride.setter
  88. def stride(self, value):
  89. assert isinstance(value, int), "The value to set `stride` must be type of int."
  90. self._preprocessor.stride = value
  91. class YOLOv8Postprocessor:
  92. def __init__(self):
  93. """Create a postprocessor for YOLOv8"""
  94. self._postprocessor = C.vision.detection.YOLOv8Postprocessor()
  95. def run(self, runtime_results, ims_info):
  96. """Postprocess the runtime results for YOLOv8
  97. :param: runtime_results: (list of FDTensor)The output FDTensor results from runtime
  98. :param: ims_info: (list of dict)Record input_shape and output_shape
  99. :return: list of DetectionResult(If the runtime_results is predict by batched samples, the length of this list equals to the batch size)
  100. """
  101. return self._postprocessor.run(runtime_results, ims_info)
  102. @property
  103. def conf_threshold(self):
  104. """
  105. confidence threshold for postprocessing, default is 0.25
  106. """
  107. return self._postprocessor.conf_threshold
  108. @property
  109. def nms_threshold(self):
  110. """
  111. nms threshold for postprocessing, default is 0.5
  112. """
  113. return self._postprocessor.nms_threshold
  114. @property
  115. def multi_label(self):
  116. """
  117. multi_label for postprocessing, set true for eval, default is True
  118. """
  119. return self._postprocessor.multi_label
  120. @conf_threshold.setter
  121. def conf_threshold(self, conf_threshold):
  122. assert isinstance(
  123. conf_threshold, float
  124. ), "The value to set `conf_threshold` must be type of float."
  125. self._postprocessor.conf_threshold = conf_threshold
  126. @nms_threshold.setter
  127. def nms_threshold(self, nms_threshold):
  128. assert isinstance(
  129. nms_threshold, float
  130. ), "The value to set `nms_threshold` must be type of float."
  131. self._postprocessor.nms_threshold = nms_threshold
  132. @multi_label.setter
  133. def multi_label(self, value):
  134. assert isinstance(
  135. value, bool
  136. ), "The value to set `multi_label` must be type of bool."
  137. self._postprocessor.multi_label = value
  138. class YOLOv8(UltraInferModel):
  139. def __init__(
  140. self,
  141. model_file,
  142. params_file="",
  143. runtime_option=None,
  144. model_format=ModelFormat.ONNX,
  145. ):
  146. """Load a YOLOv8 model exported by YOLOv8.
  147. :param model_file: (str)Path of model file, e.g ./yolov8s.onnx
  148. :param params_file: (str)Path of parameters file, e.g yolox/model.pdiparams, if the model_fomat is ModelFormat.ONNX, this param will be ignored, can be set as empty string
  149. :param runtime_option: (ultra_infer.RuntimeOption)RuntimeOption for inference this model, if it's None, will use the default backend on CPU
  150. :param model_format: (ultra_infer.ModelForamt)Model format of the loaded model
  151. """
  152. super(YOLOv8, self).__init__(runtime_option)
  153. self._model = C.vision.detection.YOLOv8(
  154. model_file, params_file, self._runtime_option, model_format
  155. )
  156. assert self.initialized, "YOLOv8 initialize failed."
  157. def predict(self, input_image):
  158. """Detect an input image
  159. :param input_image: (numpy.ndarray)The input image data, 3-D array with layout HWC, BGR format
  160. :param conf_threshold: confidence threshold for postprocessing, default is 0.25
  161. :param nms_iou_threshold: iou threshold for NMS, default is 0.5
  162. :return: DetectionResult
  163. """
  164. return self._model.predict(input_image)
  165. def batch_predict(self, images):
  166. """Classify a batch of input image
  167. :param im: (list of numpy.ndarray) The input image list, each element is a 3-D array with layout HWC, BGR format
  168. :return list of DetectionResult
  169. """
  170. return self._model.batch_predict(images)
  171. @property
  172. def preprocessor(self):
  173. """Get YOLOv8Preprocessor object of the loaded model
  174. :return YOLOv8Preprocessor
  175. """
  176. return self._model.preprocessor
  177. @property
  178. def postprocessor(self):
  179. """Get YOLOv8Postprocessor object of the loaded model
  180. :return YOLOv8Postprocessor
  181. """
  182. return self._model.postprocessor