co63oc 6 mesi fa
parent
commit
845c08c9f1
100 ha cambiato i file con 188 aggiunte e 188 eliminazioni
  1. 4 4
      libs/ultra-infer/cmake/check.cmake
  2. 4 4
      libs/ultra-infer/cmake/cuda.cmake
  3. 1 1
      libs/ultra-infer/cmake/faiss.cmake
  4. 1 1
      libs/ultra-infer/cmake/utils.cmake
  5. 1 1
      libs/ultra-infer/cpack/debian_postinst.in
  6. 1 1
      libs/ultra-infer/cpack/rpm_postinst.in
  7. 1 1
      libs/ultra-infer/python/ultra_infer/pipeline/pptinypose/__init__.py
  8. 1 1
      libs/ultra-infer/python/ultra_infer/py_only/ts/processors.py
  9. 2 2
      libs/ultra-infer/python/ultra_infer/vision/classification/contrib/resnet.py
  10. 2 2
      libs/ultra-infer/python/ultra_infer/vision/common/manager.py
  11. 2 2
      libs/ultra-infer/python/ultra_infer/vision/common/processors.py
  12. 1 1
      libs/ultra-infer/python/ultra_infer/vision/detection/contrib/nanodet_plus.py
  13. 2 2
      libs/ultra-infer/python/ultra_infer/vision/detection/contrib/scaled_yolov4.py
  14. 2 2
      libs/ultra-infer/python/ultra_infer/vision/detection/contrib/yolor.py
  15. 1 1
      libs/ultra-infer/python/ultra_infer/vision/detection/contrib/yolov5.py
  16. 2 2
      libs/ultra-infer/python/ultra_infer/vision/detection/contrib/yolov5lite.py
  17. 1 1
      libs/ultra-infer/python/ultra_infer/vision/detection/contrib/yolov5seg.py
  18. 2 2
      libs/ultra-infer/python/ultra_infer/vision/detection/contrib/yolov6.py
  19. 1 1
      libs/ultra-infer/python/ultra_infer/vision/detection/contrib/yolov7end2end_ort.py
  20. 1 1
      libs/ultra-infer/python/ultra_infer/vision/detection/contrib/yolov7end2end_trt.py
  21. 1 1
      libs/ultra-infer/python/ultra_infer/vision/detection/contrib/yolov8.py
  22. 2 2
      libs/ultra-infer/python/ultra_infer/vision/detection/contrib/yolox.py
  23. 2 2
      libs/ultra-infer/python/ultra_infer/vision/facealign/contrib/pipnet.py
  24. 1 1
      libs/ultra-infer/python/ultra_infer/vision/facedet/contrib/retinaface.py
  25. 1 1
      libs/ultra-infer/python/ultra_infer/vision/facedet/contrib/scrfd.py
  26. 1 1
      libs/ultra-infer/python/ultra_infer/vision/facedet/contrib/yolov5face.py
  27. 1 1
      libs/ultra-infer/python/ultra_infer/vision/keypointdetection/pptinypose/__init__.py
  28. 2 2
      libs/ultra-infer/python/ultra_infer/vision/matting/contrib/rvm.py
  29. 7 7
      libs/ultra-infer/python/ultra_infer/vision/ocr/ppocr/__init__.py
  30. 1 1
      libs/ultra-infer/python/ultra_infer/vision/ocr/ppocr/utils/ser_vi_layoutxlm/operators.py
  31. 3 3
      libs/ultra-infer/python/ultra_infer/vision/segmentation/ppseg/__init__.py
  32. 1 1
      libs/ultra-infer/python/ultra_infer/vision/visualize/__init__.py
  33. 5 5
      libs/ultra-infer/ultra_infer/core/fd_tensor.cc
  34. 3 3
      libs/ultra-infer/ultra_infer/core/fd_tensor.h
  35. 1 1
      libs/ultra-infer/ultra_infer/core/float16.h
  36. 1 1
      libs/ultra-infer/ultra_infer/function/clip.h
  37. 1 1
      libs/ultra-infer/ultra_infer/function/concat.h
  38. 1 1
      libs/ultra-infer/ultra_infer/function/cumprod.h
  39. 5 5
      libs/ultra-infer/ultra_infer/function/elementwise.h
  40. 1 1
      libs/ultra-infer/ultra_infer/function/elementwise_functor.h
  41. 1 1
      libs/ultra-infer/ultra_infer/function/pad.h
  42. 1 1
      libs/ultra-infer/ultra_infer/function/reduce.cc
  43. 9 9
      libs/ultra-infer/ultra_infer/function/reduce.h
  44. 1 1
      libs/ultra-infer/ultra_infer/function/softmax.h
  45. 1 1
      libs/ultra-infer/ultra_infer/function/transpose.h
  46. 1 1
      libs/ultra-infer/ultra_infer/pipeline/pptinypose/pipeline.h
  47. 1 1
      libs/ultra-infer/ultra_infer/pybind/fd_tensor.cc
  48. 1 1
      libs/ultra-infer/ultra_infer/pybind/main.cc.in
  49. 1 1
      libs/ultra-infer/ultra_infer/runtime/backends/backend.h
  50. 1 1
      libs/ultra-infer/ultra_infer/runtime/backends/lite/lite_backend.cc
  51. 1 1
      libs/ultra-infer/ultra_infer/runtime/backends/om/om_backend.cc
  52. 1 1
      libs/ultra-infer/ultra_infer/runtime/backends/ort/ort_backend.cc
  53. 1 1
      libs/ultra-infer/ultra_infer/runtime/backends/ort/ort_backend.h
  54. 2 2
      libs/ultra-infer/ultra_infer/runtime/backends/paddle/ops/grid_sample_3d.cu
  55. 2 2
      libs/ultra-infer/ultra_infer/runtime/backends/paddle/ops/iou3d_nms.cc
  56. 1 1
      libs/ultra-infer/ultra_infer/runtime/backends/paddle/ops/iou3d_nms_kernel.cu
  57. 2 2
      libs/ultra-infer/ultra_infer/runtime/backends/paddle/option.h
  58. 1 1
      libs/ultra-infer/ultra_infer/runtime/backends/paddle/paddle_backend.cc
  59. 2 2
      libs/ultra-infer/ultra_infer/runtime/backends/poros/common/compile.h
  60. 2 2
      libs/ultra-infer/ultra_infer/runtime/backends/rknpu2/option.h
  61. 2 2
      libs/ultra-infer/ultra_infer/runtime/backends/tensorrt/trt_backend.cc
  62. 2 2
      libs/ultra-infer/ultra_infer/runtime/backends/tensorrt/trt_backend.h
  63. 3 3
      libs/ultra-infer/ultra_infer/runtime/runtime.cc
  64. 2 2
      libs/ultra-infer/ultra_infer/runtime/runtime.h
  65. 1 1
      libs/ultra-infer/ultra_infer/vision/classification/contrib/resnet.cc
  66. 1 1
      libs/ultra-infer/ultra_infer/vision/classification/contrib/resnet_pybind.cc
  67. 1 1
      libs/ultra-infer/ultra_infer/vision/classification/contrib/yolov5cls/yolov5cls.h
  68. 1 1
      libs/ultra-infer/ultra_infer/vision/classification/ppcls/model.h
  69. 1 1
      libs/ultra-infer/ultra_infer/vision/classification/ppcls/preprocessor.h
  70. 1 1
      libs/ultra-infer/ultra_infer/vision/classification/ppshitu/ppshituv2_rec.h
  71. 1 1
      libs/ultra-infer/ultra_infer/vision/classification/ppshitu/ppshituv2_rec_preprocessor.h
  72. 1 1
      libs/ultra-infer/ultra_infer/vision/common/processors/cast.h
  73. 1 1
      libs/ultra-infer/ultra_infer/vision/common/processors/center_crop.h
  74. 4 4
      libs/ultra-infer/ultra_infer/vision/common/processors/color_space_convert.h
  75. 1 1
      libs/ultra-infer/ultra_infer/vision/common/processors/convert.h
  76. 1 1
      libs/ultra-infer/ultra_infer/vision/common/processors/convert_and_permute.h
  77. 1 1
      libs/ultra-infer/ultra_infer/vision/common/processors/crop.h
  78. 3 3
      libs/ultra-infer/ultra_infer/vision/common/processors/limit_by_stride.h
  79. 2 2
      libs/ultra-infer/ultra_infer/vision/common/processors/limit_short.h
  80. 1 1
      libs/ultra-infer/ultra_infer/vision/common/processors/manager.h
  81. 1 1
      libs/ultra-infer/ultra_infer/vision/common/processors/mat_batch.h
  82. 2 2
      libs/ultra-infer/ultra_infer/vision/common/processors/normalize.h
  83. 1 1
      libs/ultra-infer/ultra_infer/vision/common/processors/normalize_and_permute.h
  84. 1 1
      libs/ultra-infer/ultra_infer/vision/common/processors/proc_lib.cc
  85. 4 4
      libs/ultra-infer/ultra_infer/vision/common/processors/resize.h
  86. 2 2
      libs/ultra-infer/ultra_infer/vision/common/processors/resize_by_short.h
  87. 2 2
      libs/ultra-infer/ultra_infer/vision/common/result.h
  88. 1 1
      libs/ultra-infer/ultra_infer/vision/detection/contrib/fastestdet/fastestdet.h
  89. 2 2
      libs/ultra-infer/ultra_infer/vision/detection/contrib/nanodet_plus.cc
  90. 3 3
      libs/ultra-infer/ultra_infer/vision/detection/contrib/nanodet_plus.h
  91. 2 2
      libs/ultra-infer/ultra_infer/vision/detection/contrib/rknpu2/preprocessor.h
  92. 1 1
      libs/ultra-infer/ultra_infer/vision/detection/contrib/rknpu2/rkyolo.h
  93. 4 4
      libs/ultra-infer/ultra_infer/vision/detection/contrib/scaledyolov4.h
  94. 3 3
      libs/ultra-infer/ultra_infer/vision/detection/contrib/yolor.h
  95. 3 3
      libs/ultra-infer/ultra_infer/vision/detection/contrib/yolov5/preprocessor.h
  96. 3 3
      libs/ultra-infer/ultra_infer/vision/detection/contrib/yolov5/yolov5.h
  97. 7 7
      libs/ultra-infer/ultra_infer/vision/detection/contrib/yolov5lite.h
  98. 3 3
      libs/ultra-infer/ultra_infer/vision/detection/contrib/yolov5seg/preprocessor.h
  99. 1 1
      libs/ultra-infer/ultra_infer/vision/detection/contrib/yolov5seg/yolov5seg.h
  100. 4 4
      libs/ultra-infer/ultra_infer/vision/detection/contrib/yolov6.h

+ 4 - 4
libs/ultra-infer/cmake/check.cmake

@@ -26,20 +26,20 @@ if(IOS)
     message(FATAL_ERROR "Not support OpenVINO backend for IOS now. Please set ENABLE_OPENVINO_BACKEND=OFF.")
     message(FATAL_ERROR "Not support OpenVINO backend for IOS now. Please set ENABLE_OPENVINO_BACKEND=OFF.")
   endif()
   endif()
   if(ENABLE_TRT_BACKEND)
   if(ENABLE_TRT_BACKEND)
-    message(FATAL_ERROR "Not support TensorRT backend for Andorid/IOS now. Please set ENABLE_TRT_BACKEND=OFF.")
+    message(FATAL_ERROR "Not support TensorRT backend for Android/IOS now. Please set ENABLE_TRT_BACKEND=OFF.")
   endif()
   endif()
 endif()
 endif()
 
 
 if(WITH_GPU)
 if(WITH_GPU)
   if(APPLE)
   if(APPLE)
-    message(FATAL_ERROR "Cannot enable GPU while compling in Mac OSX.")
+    message(FATAL_ERROR "Cannot enable GPU while compiling in Mac OSX.")
   elseif(IOS)
   elseif(IOS)
-    message(FATAL_ERROR "Cannot enable GPU while compling in IOS.")
+    message(FATAL_ERROR "Cannot enable GPU while compiling in IOS.")
   endif()
   endif()
 endif()
 endif()
 
 
 if(WITH_OPENCL)
 if(WITH_OPENCL)
   if(NOT ENABLE_LITE_BACKEND)
   if(NOT ENABLE_LITE_BACKEND)
-    message(FATAL_ERROR "Cannot enable OpenCL while compling unless in Paddle Lite backend is enbaled.")
+    message(FATAL_ERROR "Cannot enable OpenCL while compiling unless in Paddle Lite backend is enabled.")
   endif()
   endif()
 endif()
 endif()

+ 4 - 4
libs/ultra-infer/cmake/cuda.cmake

@@ -10,7 +10,7 @@ if(BUILD_ON_JETSON)
   set(fd_known_gpu_archs "53 62 72")
   set(fd_known_gpu_archs "53 62 72")
   set(fd_known_gpu_archs10 "53 62 72")
   set(fd_known_gpu_archs10 "53 62 72")
 else()
 else()
-  message("Using New Release Strategy - All Arches Packge")
+  message("Using New Release Strategy - All Arches Package")
   set(fd_known_gpu_archs "35 50 52 60 61 70 75 80 86")
   set(fd_known_gpu_archs "35 50 52 60 61 70 75 80 86")
   set(fd_known_gpu_archs10 "35 50 52 60 61 70 75")
   set(fd_known_gpu_archs10 "35 50 52 60 61 70 75")
   set(fd_known_gpu_archs11 "50 60 61 70 75 80")
   set(fd_known_gpu_archs11 "50 60 61 70 75 80")
@@ -58,7 +58,7 @@ function(detect_installed_gpus out_variable)
       set(CUDA_gpu_detect_output
       set(CUDA_gpu_detect_output
           ${nvcc_out}
           ${nvcc_out}
           CACHE INTERNAL
           CACHE INTERNAL
-                "Returned GPU architetures from detect_installed_gpus tool"
+                "Returned GPU architectures from detect_installed_gpus tool"
                 FORCE)
                 FORCE)
     endif()
     endif()
   endif()
   endif()
@@ -98,7 +98,7 @@ function(select_nvcc_arch_flags out_variable)
   # set CUDA_ARCH_NAME strings (so it will be seen as dropbox in CMake-Gui)
   # set CUDA_ARCH_NAME strings (so it will be seen as dropbox in CMake-Gui)
   set(CUDA_ARCH_NAME
   set(CUDA_ARCH_NAME
       ${archs_name_default}
       ${archs_name_default}
-      CACHE STRING "Select target NVIDIA GPU achitecture.")
+      CACHE STRING "Select target NVIDIA GPU architecture.")
   set_property(CACHE CUDA_ARCH_NAME PROPERTY STRINGS "" ${archs_names})
   set_property(CACHE CUDA_ARCH_NAME PROPERTY STRINGS "" ${archs_names})
   mark_as_advanced(CUDA_ARCH_NAME)
   mark_as_advanced(CUDA_ARCH_NAME)
 
 
@@ -252,7 +252,7 @@ else()
   message(WARNING "Detected custom CMAKE_CUDA_STANDARD is using: ${CMAKE_CUDA_STANDARD}")  
   message(WARNING "Detected custom CMAKE_CUDA_STANDARD is using: ${CMAKE_CUDA_STANDARD}")  
 endif()
 endif()
 
 
-# (Note) For windows, if delete /W[1-4], /W1 will be added defaultly and conflic with -w
+# (Note) For windows, if delete /W[1-4], /W1 will be added defaultly and conflict with -w
 # So replace /W[1-4] with /W0
 # So replace /W[1-4] with /W0
 if(WIN32)
 if(WIN32)
   string(REGEX REPLACE "/W[1-4]" " /W0 " CMAKE_CUDA_FLAGS "${CMAKE_CUDA_FLAGS}")
   string(REGEX REPLACE "/W[1-4]" " /W0 " CMAKE_CUDA_FLAGS "${CMAKE_CUDA_FLAGS}")

+ 1 - 1
libs/ultra-infer/cmake/faiss.cmake

@@ -113,7 +113,7 @@ else() # Linux
   list(APPEND FAISS_LIBRARIES ${LAPACK_LIBRARIES})
   list(APPEND FAISS_LIBRARIES ${LAPACK_LIBRARIES})
 endif()
 endif()
 
 
-# Add OpenMP (REQUIRED), OpenMP must be avaliable.
+# Add OpenMP (REQUIRED), OpenMP must be available.
 find_package(OpenMP REQUIRED)
 find_package(OpenMP REQUIRED)
 list(APPEND FAISS_LIBRARIES OpenMP::OpenMP_CXX)
 list(APPEND FAISS_LIBRARIES OpenMP::OpenMP_CXX)
 
 

+ 1 - 1
libs/ultra-infer/cmake/utils.cmake

@@ -152,7 +152,7 @@ function(bundle_static_library tgt_name bundled_tgt_name fake_target)
 
 
   list(REMOVE_DUPLICATES static_libs)
   list(REMOVE_DUPLICATES static_libs)
   list(REMOVE_ITEM static_libs ${REDUNDANT_STATIC_LIBS})
   list(REMOVE_ITEM static_libs ${REDUNDANT_STATIC_LIBS})
-  message(STATUS "WITH_STATIC_LIB=${WITH_STATIC_LIB}, Found all needed static libs from dependecy tree: ${static_libs}")
+  message(STATUS "WITH_STATIC_LIB=${WITH_STATIC_LIB}, Found all needed static libs from dependency tree: ${static_libs}")
   message(STATUS "Exclude some redundant static libs: ${REDUNDANT_STATIC_LIBS}")
   message(STATUS "Exclude some redundant static libs: ${REDUNDANT_STATIC_LIBS}")
 
 
   set(bundled_tgt_full_name
   set(bundled_tgt_full_name

+ 1 - 1
libs/ultra-infer/cpack/debian_postinst.in

@@ -16,7 +16,7 @@ case "$1" in
             LIBS_DIRECOTRIES[${#LIBS_DIRECOTRIES[@]}]=${SO_FILE%/*}
             LIBS_DIRECOTRIES[${#LIBS_DIRECOTRIES[@]}]=${SO_FILE%/*}
         done
         done
 
 
-        # Remove the dumplicate directories
+        # Remove the duplicate directories
         LIBS_DIRECOTRIES=($(awk -v RS=' ' '!a[$1]++' <<< ${LIBS_DIRECOTRIES[@]}))
         LIBS_DIRECOTRIES=($(awk -v RS=' ' '!a[$1]++' <<< ${LIBS_DIRECOTRIES[@]}))
 
 
         IMPORT_PATH=""
         IMPORT_PATH=""

+ 1 - 1
libs/ultra-infer/cpack/rpm_postinst.in

@@ -14,7 +14,7 @@ for SO_FILE in $ALL_SO_FILES;do
     LIBS_DIRECOTRIES[${#LIBS_DIRECOTRIES[@]}]=${SO_FILE%/*}
     LIBS_DIRECOTRIES[${#LIBS_DIRECOTRIES[@]}]=${SO_FILE%/*}
 done
 done
 
 
-# Remove the dumplicate directories
+# Remove the duplicate directories
 LIBS_DIRECOTRIES=($(awk -v RS=' ' '!a[$1]++' <<< ${LIBS_DIRECOTRIES[@]}))
 LIBS_DIRECOTRIES=($(awk -v RS=' ' '!a[$1]++' <<< ${LIBS_DIRECOTRIES[@]}))
 
 
 IMPORT_PATH=""
 IMPORT_PATH=""

+ 1 - 1
libs/ultra-infer/python/ultra_infer/pipeline/pptinypose/__init__.py

@@ -40,7 +40,7 @@ class PPTinyPose(object):
 
 
     @property
     @property
     def detection_model_score_threshold(self):
     def detection_model_score_threshold(self):
-        """Atrribute of PPTinyPose pipeline model. Stating the score threshold for detectin model to filter bbox before inputting pptinypose model
+        """Attribute of PPTinyPose pipeline model. Stating the score threshold for detectin model to filter bbox before inputting pptinypose model
 
 
         :return: value of detection_model_score_threshold(float)
         :return: value of detection_model_score_threshold(float)
         """
         """

+ 1 - 1
libs/ultra-infer/python/ultra_infer/py_only/ts/processors.py

@@ -231,7 +231,7 @@ def _load_from_dataframe(
     fillna_window_size: int = 10,
     fillna_window_size: int = 10,
     **kwargs,
     **kwargs,
 ):
 ):
-    dfs = []  # seperate multiple group
+    dfs = []  # separate multiple group
     if group_id is not None:
     if group_id is not None:
         group_unique = df[group_id].unique()
         group_unique = df[group_id].unique()
         for column in group_unique:
         for column in group_unique:

+ 2 - 2
libs/ultra-infer/python/ultra_infer/vision/classification/contrib/resnet.py

@@ -66,14 +66,14 @@ class ResNet(UltraInferModel):
     @property
     @property
     def mean_vals(self):
     def mean_vals(self):
         """
         """
-        Returns the mean value of normlization, default mean_vals = [0.485f, 0.456f, 0.406f];
+        Returns the mean value of normalization, default mean_vals = [0.485f, 0.456f, 0.406f];
         """
         """
         return self._model.mean_vals
         return self._model.mean_vals
 
 
     @property
     @property
     def std_vals(self):
     def std_vals(self):
         """
         """
-        Returns the std value of normlization, default std_vals = [0.229f, 0.224f, 0.225f];
+        Returns the std value of normalization, default std_vals = [0.229f, 0.224f, 0.225f];
         """
         """
         return self._model.std_vals
         return self._model.std_vals
 
 

+ 2 - 2
libs/ultra-infer/python/ultra_infer/vision/common/manager.py

@@ -32,7 +32,7 @@ class ProcessorManager:
     def use_cuda(self, enable_cv_cuda=False, gpu_id=-1):
     def use_cuda(self, enable_cv_cuda=False, gpu_id=-1):
         """Use CUDA processors
         """Use CUDA processors
 
 
-        :param: enable_cv_cuda: Ture: use CV-CUDA, False: use CUDA only
+        :param: enable_cv_cuda: True: use CV-CUDA, False: use CUDA only
         :param: gpu_id: GPU device id
         :param: gpu_id: GPU device id
         """
         """
         return self._manager.use_cuda(enable_cv_cuda, gpu_id)
         return self._manager.use_cuda(enable_cv_cuda, gpu_id)
@@ -49,7 +49,7 @@ class PyProcessorManager(ABC):
     def use_cuda(self, enable_cv_cuda=False, gpu_id=-1):
     def use_cuda(self, enable_cv_cuda=False, gpu_id=-1):
         """Use CUDA processors
         """Use CUDA processors
 
 
-        :param: enable_cv_cuda: Ture: use CV-CUDA, False: use CUDA only
+        :param: enable_cv_cuda: True: use CV-CUDA, False: use CUDA only
         :param: gpu_id: GPU device id
         :param: gpu_id: GPU device id
         """
         """
         return self._manager.use_cuda(enable_cv_cuda, gpu_id)
         return self._manager.use_cuda(enable_cv_cuda, gpu_id)

+ 2 - 2
libs/ultra-infer/python/ultra_infer/vision/common/processors.py

@@ -67,7 +67,7 @@ class Pad(Processor):
 
 
 class NormalizeAndPermute(Processor):
 class NormalizeAndPermute(Processor):
     def __init__(self, mean=[], std=[], is_scale=True, min=[], max=[], swap_rb=False):
     def __init__(self, mean=[], std=[], is_scale=True, min=[], max=[], swap_rb=False):
-        """Creae a Normalize and a Permute operation with the given parameters.
+        """Create a Normalize and a Permute operation with the given parameters.
 
 
         :param mean: A list containing the mean of each channel
         :param mean: A list containing the mean of each channel
         :param std: A list containing the standard deviation of each channel
         :param std: A list containing the standard deviation of each channel
@@ -100,7 +100,7 @@ class HWC2CHW(Processor):
 
 
 class Normalize(Processor):
 class Normalize(Processor):
     def __init__(self, mean, std, is_scale=True, min=[], max=[], swap_rb=False):
     def __init__(self, mean, std, is_scale=True, min=[], max=[], swap_rb=False):
-        """Creat a new normalize opereator with given paremeters.
+        """Creat a new normalize opereator with given parameters.
 
 
         :param mean: A list containing the mean of each channel
         :param mean: A list containing the mean of each channel
         :param std: A list containing the standard deviation of each channel
         :param std: A list containing the standard deviation of each channel

+ 1 - 1
libs/ultra-infer/python/ultra_infer/vision/detection/contrib/nanodet_plus.py

@@ -79,7 +79,7 @@ class NanoDetPlus(UltraInferModel):
 
 
     @property
     @property
     def max_wh(self):
     def max_wh(self):
-        # for offseting the boxes by classes when using NMS, default 4096
+        # for offsetting the boxes by classes when using NMS, default 4096
         return self._model.max_wh
         return self._model.max_wh
 
 
     @property
     @property

+ 2 - 2
libs/ultra-infer/python/ultra_infer/vision/detection/contrib/scaled_yolov4.py

@@ -75,7 +75,7 @@ class ScaledYOLOv4(UltraInferModel):
 
 
     @property
     @property
     def is_mini_pad(self):
     def is_mini_pad(self):
-        # only pad to the minimum rectange which height and width is times of stride
+        # only pad to the minimum rectangle which height and width is times of stride
         return self._model.is_mini_pad
         return self._model.is_mini_pad
 
 
     @property
     @property
@@ -90,7 +90,7 @@ class ScaledYOLOv4(UltraInferModel):
 
 
     @property
     @property
     def max_wh(self):
     def max_wh(self):
-        # for offseting the boxes by classes when using NMS
+        # for offsetting the boxes by classes when using NMS
         return self._model.max_wh
         return self._model.max_wh
 
 
     @size.setter
     @size.setter

+ 2 - 2
libs/ultra-infer/python/ultra_infer/vision/detection/contrib/yolor.py

@@ -74,7 +74,7 @@ class YOLOR(UltraInferModel):
 
 
     @property
     @property
     def is_mini_pad(self):
     def is_mini_pad(self):
-        # only pad to the minimum rectange which height and width is times of stride
+        # only pad to the minimum rectangle which height and width is times of stride
         return self._model.is_mini_pad
         return self._model.is_mini_pad
 
 
     @property
     @property
@@ -89,7 +89,7 @@ class YOLOR(UltraInferModel):
 
 
     @property
     @property
     def max_wh(self):
     def max_wh(self):
-        # for offseting the boxes by classes when using NMS
+        # for offsetting the boxes by classes when using NMS
         return self._model.max_wh
         return self._model.max_wh
 
 
     @size.setter
     @size.setter

+ 1 - 1
libs/ultra-infer/python/ultra_infer/vision/detection/contrib/yolov5.py

@@ -56,7 +56,7 @@ class YOLOv5Preprocessor:
     @property
     @property
     def is_mini_pad(self):
     def is_mini_pad(self):
         """
         """
-        is_mini_pad for preprocessing, pad to the minimum rectange which height and width is times of stride, default false
+        is_mini_pad for preprocessing, pad to the minimum rectangle which height and width is times of stride, default false
         """
         """
         return self._preprocessor.is_mini_pad
         return self._preprocessor.is_mini_pad
 
 

+ 2 - 2
libs/ultra-infer/python/ultra_infer/vision/detection/contrib/yolov5lite.py

@@ -74,7 +74,7 @@ class YOLOv5Lite(UltraInferModel):
 
 
     @property
     @property
     def is_mini_pad(self):
     def is_mini_pad(self):
-        # only pad to the minimum rectange which height and width is times of stride
+        # only pad to the minimum rectangle which height and width is times of stride
         return self._model.is_mini_pad
         return self._model.is_mini_pad
 
 
     @property
     @property
@@ -89,7 +89,7 @@ class YOLOv5Lite(UltraInferModel):
 
 
     @property
     @property
     def max_wh(self):
     def max_wh(self):
-        # for offseting the boxes by classes when using NMS
+        # for offsetting the boxes by classes when using NMS
         return self._model.max_wh
         return self._model.max_wh
 
 
     @property
     @property

+ 1 - 1
libs/ultra-infer/python/ultra_infer/vision/detection/contrib/yolov5seg.py

@@ -56,7 +56,7 @@ class YOLOv5SegPreprocessor:
     @property
     @property
     def is_mini_pad(self):
     def is_mini_pad(self):
         """
         """
-        is_mini_pad for preprocessing, pad to the minimum rectange which height and width is times of stride, default false
+        is_mini_pad for preprocessing, pad to the minimum rectangle which height and width is times of stride, default false
         """
         """
         return self._preprocessor.is_mini_pad
         return self._preprocessor.is_mini_pad
 
 

+ 2 - 2
libs/ultra-infer/python/ultra_infer/vision/detection/contrib/yolov6.py

@@ -74,7 +74,7 @@ class YOLOv6(UltraInferModel):
 
 
     @property
     @property
     def is_mini_pad(self):
     def is_mini_pad(self):
-        # only pad to the minimum rectange which height and width is times of stride
+        # only pad to the minimum rectangle which height and width is times of stride
         return self._model.is_mini_pad
         return self._model.is_mini_pad
 
 
     @property
     @property
@@ -89,7 +89,7 @@ class YOLOv6(UltraInferModel):
 
 
     @property
     @property
     def max_wh(self):
     def max_wh(self):
-        # for offseting the boxes by classes when using NMS
+        # for offsetting the boxes by classes when using NMS
         return self._model.max_wh
         return self._model.max_wh
 
 
     @size.setter
     @size.setter

+ 1 - 1
libs/ultra-infer/python/ultra_infer/vision/detection/contrib/yolov7end2end_ort.py

@@ -73,7 +73,7 @@ class YOLOv7End2EndORT(UltraInferModel):
 
 
     @property
     @property
     def is_mini_pad(self):
     def is_mini_pad(self):
-        # only pad to the minimum rectange which height and width is times of stride
+        # only pad to the minimum rectangle which height and width is times of stride
         return self._model.is_mini_pad
         return self._model.is_mini_pad
 
 
     @property
     @property

+ 1 - 1
libs/ultra-infer/python/ultra_infer/vision/detection/contrib/yolov7end2end_trt.py

@@ -73,7 +73,7 @@ class YOLOv7End2EndTRT(UltraInferModel):
 
 
     @property
     @property
     def is_mini_pad(self):
     def is_mini_pad(self):
-        # only pad to the minimum rectange which height and width is times of stride
+        # only pad to the minimum rectangle which height and width is times of stride
         return self._model.is_mini_pad
         return self._model.is_mini_pad
 
 
     @property
     @property

+ 1 - 1
libs/ultra-infer/python/ultra_infer/vision/detection/contrib/yolov8.py

@@ -56,7 +56,7 @@ class YOLOv8Preprocessor:
     @property
     @property
     def is_mini_pad(self):
     def is_mini_pad(self):
         """
         """
-        is_mini_pad for preprocessing, pad to the minimum rectange which height and width is times of stride, default false
+        is_mini_pad for preprocessing, pad to the minimum rectangle which height and width is times of stride, default false
         """
         """
         return self._preprocessor.is_mini_pad
         return self._preprocessor.is_mini_pad
 
 

+ 2 - 2
libs/ultra-infer/python/ultra_infer/vision/detection/contrib/yolox.py

@@ -73,7 +73,7 @@ class YOLOX(UltraInferModel):
         whether the model_file was exported with decode module.
         whether the model_file was exported with decode module.
         The official YOLOX/tools/export_onnx.py script will export ONNX file without decode module.
         The official YOLOX/tools/export_onnx.py script will export ONNX file without decode module.
         Please set it 'true' manually if the model file was exported with decode module.
         Please set it 'true' manually if the model file was exported with decode module.
-        Defalut False.
+        Default False.
         """
         """
         return self._model.is_decode_exported
         return self._model.is_decode_exported
 
 
@@ -86,7 +86,7 @@ class YOLOX(UltraInferModel):
 
 
     @property
     @property
     def max_wh(self):
     def max_wh(self):
-        # for offseting the boxes by classes when using NMS
+        # for offsetting the boxes by classes when using NMS
         return self._model.max_wh
         return self._model.max_wh
 
 
     @size.setter
     @size.setter

+ 2 - 2
libs/ultra-infer/python/ultra_infer/vision/facealign/contrib/pipnet.py

@@ -63,14 +63,14 @@ class PIPNet(UltraInferModel):
     @property
     @property
     def mean_vals(self):
     def mean_vals(self):
         """
         """
-        Returns the mean value of normlization, default mean_vals = [0.485f, 0.456f, 0.406f];
+        Returns the mean value of normalization, default mean_vals = [0.485f, 0.456f, 0.406f];
         """
         """
         return self._model.mean_vals
         return self._model.mean_vals
 
 
     @property
     @property
     def std_vals(self):
     def std_vals(self):
         """
         """
-        Returns the std value of normlization, default std_vals = [0.229f, 0.224f, 0.225f];
+        Returns the std value of normalization, default std_vals = [0.229f, 0.224f, 0.225f];
         """
         """
         return self._model.std_vals
         return self._model.std_vals
 
 

+ 1 - 1
libs/ultra-infer/python/ultra_infer/vision/facedet/contrib/retinaface.py

@@ -109,7 +109,7 @@ class RetinaFace(UltraInferModel):
         ), "The value to set `variance` must be type of tuple or list."
         ), "The value to set `variance` must be type of tuple or list."
         assert (
         assert (
             len(value) == 2
             len(value) == 2
-        ), "The value to set `variance` must contatins 2 elements".format(len(value))
+        ), "The value to set `variance` must contains 2 elements".format(len(value))
         self._model.variance = value
         self._model.variance = value
 
 
     @downsample_strides.setter
     @downsample_strides.setter

+ 1 - 1
libs/ultra-infer/python/ultra_infer/vision/facedet/contrib/scrfd.py

@@ -86,7 +86,7 @@ class SCRFD(UltraInferModel):
 
 
     @property
     @property
     def is_mini_pad(self):
     def is_mini_pad(self):
-        # only pad to the minimum rectange which height and width is times of stride
+        # only pad to the minimum rectangle which height and width is times of stride
         return self._model.is_mini_pad
         return self._model.is_mini_pad
 
 
     @property
     @property

+ 1 - 1
libs/ultra-infer/python/ultra_infer/vision/facedet/contrib/yolov5face.py

@@ -74,7 +74,7 @@ class YOLOv5Face(UltraInferModel):
 
 
     @property
     @property
     def is_mini_pad(self):
     def is_mini_pad(self):
-        # only pad to the minimum rectange which height and width is times of stride
+        # only pad to the minimum rectangle which height and width is times of stride
         return self._model.is_mini_pad
         return self._model.is_mini_pad
 
 
     @property
     @property

+ 1 - 1
libs/ultra-infer/python/ultra_infer/vision/keypointdetection/pptinypose/__init__.py

@@ -60,7 +60,7 @@ class PPTinyPose(UltraInferModel):
 
 
     @property
     @property
     def use_dark(self):
     def use_dark(self):
-        """Atrribute of PPTinyPose model. Stating whether using Distribution-Aware Coordinate Representation for Human Pose Estimation(DARK for short) in postprocess, default is True
+        """Attribute of PPTinyPose model. Stating whether using Distribution-Aware Coordinate Representation for Human Pose Estimation(DARK for short) in postprocess, default is True
 
 
         :return: value of use_dark(bool)
         :return: value of use_dark(bool)
         """
         """

+ 2 - 2
libs/ultra-infer/python/ultra_infer/vision/matting/contrib/rvm.py

@@ -58,14 +58,14 @@ class RobustVideoMatting(UltraInferModel):
     @property
     @property
     def video_mode(self):
     def video_mode(self):
         """
         """
-        Whether to open the video mode, if there are some irrelevant pictures, set it to fasle, the default is true
+        Whether to open the video mode, if there are some irrelevant pictures, set it to false, the default is true
         """
         """
         return self._model.video_mode
         return self._model.video_mode
 
 
     @property
     @property
     def swap_rb(self):
     def swap_rb(self):
         """
         """
-        Whether convert to RGB, Set to false if you have converted YUV format images to RGB outside the model, dafault true
+        Whether convert to RGB, Set to false if you have converted YUV format images to RGB outside the model, default true
         """
         """
         return self._model.swap_rb
         return self._model.swap_rb
 
 

+ 7 - 7
libs/ultra-infer/python/ultra_infer/vision/ocr/ppocr/__init__.py

@@ -1295,7 +1295,7 @@ class StructureV2Layout(UltraInferModel):
 
 
 class PPOCRv4(UltraInferModel):
 class PPOCRv4(UltraInferModel):
     def __init__(self, det_model=None, cls_model=None, rec_model=None):
     def __init__(self, det_model=None, cls_model=None, rec_model=None):
-        """Consruct a pipeline with text detector, direction classifier and text recognizer models
+        """Construct a pipeline with text detector, direction classifier and text recognizer models
 
 
         :param det_model: (UltraInferModel) The detection model object created by ultra_infer.vision.ocr.DBDetector.
         :param det_model: (UltraInferModel) The detection model object created by ultra_infer.vision.ocr.DBDetector.
         :param cls_model: (UltraInferModel) The classification model object created by ultra_infer.vision.ocr.Classifier.
         :param cls_model: (UltraInferModel) The classification model object created by ultra_infer.vision.ocr.Classifier.
@@ -1379,7 +1379,7 @@ class PPOCRSystemv4(PPOCRv4):
 
 
 class PPOCRv3(UltraInferModel):
 class PPOCRv3(UltraInferModel):
     def __init__(self, det_model=None, cls_model=None, rec_model=None):
     def __init__(self, det_model=None, cls_model=None, rec_model=None):
-        """Consruct a pipeline with text detector, direction classifier and text recognizer models
+        """Construct a pipeline with text detector, direction classifier and text recognizer models
 
 
         :param det_model: (UltraInferModel) The detection model object created by ultra_infer.vision.ocr.DBDetector.
         :param det_model: (UltraInferModel) The detection model object created by ultra_infer.vision.ocr.DBDetector.
         :param cls_model: (UltraInferModel) The classification model object created by ultra_infer.vision.ocr.Classifier.
         :param cls_model: (UltraInferModel) The classification model object created by ultra_infer.vision.ocr.Classifier.
@@ -1458,7 +1458,7 @@ class PPOCRSystemv3(PPOCRv3):
 
 
 class PPOCRv2(UltraInferModel):
 class PPOCRv2(UltraInferModel):
     def __init__(self, det_model=None, cls_model=None, rec_model=None):
     def __init__(self, det_model=None, cls_model=None, rec_model=None):
-        """Consruct a pipeline with text detector, direction classifier and text recognizer models
+        """Construct a pipeline with text detector, direction classifier and text recognizer models
 
 
         :param det_model: (UltraInferModel) The detection model object created by ultra_infer.vision.ocr.DBDetector.
         :param det_model: (UltraInferModel) The detection model object created by ultra_infer.vision.ocr.DBDetector.
         :param cls_model: (UltraInferModel) The classification model object created by ultra_infer.vision.ocr.Classifier.
         :param cls_model: (UltraInferModel) The classification model object created by ultra_infer.vision.ocr.Classifier.
@@ -1539,7 +1539,7 @@ class PPOCRSystemv2(PPOCRv2):
 
 
 class PPStructureV2Table(UltraInferModel):
 class PPStructureV2Table(UltraInferModel):
     def __init__(self, det_model=None, rec_model=None, table_model=None):
     def __init__(self, det_model=None, rec_model=None, table_model=None):
-        """Consruct a pipeline with text detector, text recognizer and table recognizer models
+        """Construct a pipeline with text detector, text recognizer and table recognizer models
 
 
         :param det_model: (UltraInferModel) The detection model object created by ultra_infer.vision.ocr.DBDetector.
         :param det_model: (UltraInferModel) The detection model object created by ultra_infer.vision.ocr.DBDetector.
         :param rec_model: (UltraInferModel) The recognition model object created by ultra_infer.vision.ocr.Recognizer.
         :param rec_model: (UltraInferModel) The recognition model object created by ultra_infer.vision.ocr.Recognizer.
@@ -1690,7 +1690,7 @@ class StructureV2SERViLayoutXLMModelPostprocessor:
 
 
     def run(self, preds, batch=None, *args, **kwargs):
     def run(self, preds, batch=None, *args, **kwargs):
         """Run postprocess of  Ser-Vi-LayoutXLM model.
         """Run postprocess of  Ser-Vi-LayoutXLM model.
-        :param: preds: (list) results of infering
+        :param: preds: (list) results of inferring
         """
         """
         return self.postprocessor_op(preds, batch, *args, **kwargs)
         return self.postprocessor_op(preds, batch, *args, **kwargs)
 
 
@@ -1735,7 +1735,7 @@ class StructureV2SERViLayoutXLMModel(UltraInferModel):
         self.input_name_3 = self._model.get_input_info(3).name
         self.input_name_3 = self._model.get_input_info(3).name
 
 
     def predict(self, image):
     def predict(self, image):
-        assert isinstance(image, np.ndarray), "predict recives numpy.ndarray(BGR)"
+        assert isinstance(image, np.ndarray), "predict receives numpy.ndarray(BGR)"
 
 
         data = self.preprocessor.run(image)
         data = self.preprocessor.run(image)
         infer_input = {
         infer_input = {
@@ -1757,7 +1757,7 @@ class StructureV2SERViLayoutXLMModel(UltraInferModel):
     def batch_predict(self, image_list):
     def batch_predict(self, image_list):
         assert isinstance(image_list, list) and isinstance(
         assert isinstance(image_list, list) and isinstance(
             image_list[0], np.ndarray
             image_list[0], np.ndarray
-        ), "batch_predict recives list of numpy.ndarray(BGR)"
+        ), "batch_predict receives list of numpy.ndarray(BGR)"
 
 
         # reading and preprocessing images
         # reading and preprocessing images
         datas = None
         datas = None

+ 1 - 1
libs/ultra-infer/python/ultra_infer/vision/ocr/ppocr/utils/ser_vi_layoutxlm/operators.py

@@ -53,7 +53,7 @@ class Resize(object):
 
 
 
 
 class NormalizeImage(object):
 class NormalizeImage(object):
-    """normalize image such as substract mean, divide std"""
+    """normalize image such as subtract mean, divide std"""
 
 
     def __init__(self, scale=None, mean=None, std=None, order="chw", **kwargs):
     def __init__(self, scale=None, mean=None, std=None, order="chw", **kwargs):
         if isinstance(scale, str):
         if isinstance(scale, str):

+ 3 - 3
libs/ultra-infer/python/ultra_infer/vision/segmentation/ppseg/__init__.py

@@ -121,7 +121,7 @@ class PaddleSegPreprocessor(ProcessorManager):
 
 
     @property
     @property
     def is_vertical_screen(self):
     def is_vertical_screen(self):
-        """Atrribute of PP-HumanSeg model. Stating Whether the input image is vertical image(height > width), default value is False
+        """Attribute of PP-HumanSeg model. Stating Whether the input image is vertical image(height > width), default value is False
 
 
         :return: value of is_vertical_screen(bool)
         :return: value of is_vertical_screen(bool)
         """
         """
@@ -158,7 +158,7 @@ class PaddleSegPostprocessor:
 
 
     @property
     @property
     def apply_softmax(self):
     def apply_softmax(self):
-        """Atrribute of PaddleSeg model. Stating Whether applying softmax operator in the postprocess, default value is False
+        """Attribute of PaddleSeg model. Stating Whether applying softmax operator in the postprocess, default value is False
 
 
         :return: value of apply_softmax(bool)
         :return: value of apply_softmax(bool)
         """
         """
@@ -177,7 +177,7 @@ class PaddleSegPostprocessor:
 
 
     @property
     @property
     def store_score_map(self):
     def store_score_map(self):
-        """Atrribute of PaddleSeg model. Stating Whether storing score map in the SegmentationResult, default value is False
+        """Attribute of PaddleSeg model. Stating Whether storing score map in the SegmentationResult, default value is False
 
 
         :return: value of store_score_map(bool)
         :return: value of store_score_map(bool)
         """
         """

+ 1 - 1
libs/ultra-infer/python/ultra_infer/vision/visualize/__init__.py

@@ -135,7 +135,7 @@ def vis_matting(
 
 
     :param im_data: (numpy.ndarray)The input image data, 3-D array with layout HWC, BGR format
     :param im_data: (numpy.ndarray)The input image data, 3-D array with layout HWC, BGR format
     :param matting_result: the result produced by model
     :param matting_result: the result produced by model
-    :param transparent_background: whether visulizing matting result with transparent background
+    :param transparent_background: whether visualizing matting result with transparent background
     :param transparent_threshold: since the alpha value in MattringResult is a float between [0, 1], transparent_threshold is used to filter background pixel
     :param transparent_threshold: since the alpha value in MattringResult is a float between [0, 1], transparent_threshold is used to filter background pixel
     :param remove_small_connected_area: (bool) if remove_small_connected_area==True, the visualized result will not include the small connected areas
     :param remove_small_connected_area: (bool) if remove_small_connected_area==True, the visualized result will not include the small connected areas
     :return: (numpy.ndarray) image with visualized results
     :return: (numpy.ndarray) image with visualized results

+ 5 - 5
libs/ultra-infer/ultra_infer/core/fd_tensor.cc

@@ -73,7 +73,7 @@ const void *FDTensor::CpuData() const {
 #else
 #else
     FDASSERT(false,
     FDASSERT(false,
              "The UltraInfer didn't compile under -DWITH_GPU=ON, so this is "
              "The UltraInfer didn't compile under -DWITH_GPU=ON, so this is "
-             "an unexpected problem happend.");
+             "an unexpected problem happened.");
 #endif
 #endif
   }
   }
   return Data();
   return Data();
@@ -259,7 +259,7 @@ bool FDTensor::ReallocFn(size_t nbytes) {
 #else
 #else
     FDASSERT(false, "The UltraInfer FDTensor allocator didn't compile under "
     FDASSERT(false, "The UltraInfer FDTensor allocator didn't compile under "
                     "-DWITH_GPU=ON,"
                     "-DWITH_GPU=ON,"
-                    "so this is an unexpected problem happend.");
+                    "so this is an unexpected problem happened.");
 #endif
 #endif
   } else {
   } else {
     if (is_pinned_memory) {
     if (is_pinned_memory) {
@@ -276,7 +276,7 @@ bool FDTensor::ReallocFn(size_t nbytes) {
 #else
 #else
       FDASSERT(false, "The UltraInfer FDTensor allocator didn't compile under "
       FDASSERT(false, "The UltraInfer FDTensor allocator didn't compile under "
                       "-DWITH_GPU=ON,"
                       "-DWITH_GPU=ON,"
-                      "so this is an unexpected problem happend.");
+                      "so this is an unexpected problem happened.");
 #endif
 #endif
     }
     }
     buffer_ = realloc(buffer_, nbytes);
     buffer_ = realloc(buffer_, nbytes);
@@ -319,7 +319,7 @@ void FDTensor::CopyBuffer(void *dst, const void *src, size_t nbytes,
     FDASSERT(false,
     FDASSERT(false,
              "The UltraInfer didn't compile under -DWITH_GPU=ON, so copying "
              "The UltraInfer didn't compile under -DWITH_GPU=ON, so copying "
              "gpu buffer is "
              "gpu buffer is "
-             "an unexpected problem happend.");
+             "an unexpected problem happened.");
 #endif
 #endif
   } else {
   } else {
     if (is_pinned_memory) {
     if (is_pinned_memory) {
@@ -330,7 +330,7 @@ void FDTensor::CopyBuffer(void *dst, const void *src, size_t nbytes,
       FDASSERT(false,
       FDASSERT(false,
                "The UltraInfer didn't compile under -DWITH_GPU=ON, so copying "
                "The UltraInfer didn't compile under -DWITH_GPU=ON, so copying "
                "gpu buffer is "
                "gpu buffer is "
-               "an unexpected problem happend.");
+               "an unexpected problem happened.");
 #endif
 #endif
     } else {
     } else {
       std::memcpy(dst, src, nbytes);
       std::memcpy(dst, src, nbytes);

+ 3 - 3
libs/ultra-infer/ultra_infer/core/fd_tensor.h

@@ -25,7 +25,7 @@
 
 
 namespace ultra_infer {
 namespace ultra_infer {
 
 
-/*! @brief FDTensor object used to represend data matrix
+/*! @brief FDTensor object used to represent data matrix
  *
  *
  */
  */
 struct ULTRAINFER_DECL FDTensor {
 struct ULTRAINFER_DECL FDTensor {
@@ -125,7 +125,7 @@ struct ULTRAINFER_DECL FDTensor {
   // The internal data will be on CPU
   // The internal data will be on CPU
   // Some times, the external data is on the GPU, and we are going to use
   // Some times, the external data is on the GPU, and we are going to use
   // GPU to inference the model
   // GPU to inference the model
-  // so we can skip data transfer, which may improve the efficience
+  // so we can skip data transfer, which may improve the efficiency
   Device device = Device::CPU;
   Device device = Device::CPU;
   // By default the device id of FDTensor is -1, which means this value is
   // By default the device id of FDTensor is -1, which means this value is
   // invalid, and FDTensor is using the same device id as Runtime.
   // invalid, and FDTensor is using the same device id as Runtime.
@@ -153,7 +153,7 @@ struct ULTRAINFER_DECL FDTensor {
   const void *Data() const;
   const void *Data() const;
 
 
   // Use this data to get the tensor data to process
   // Use this data to get the tensor data to process
-  // Since the most senario is process data in CPU
+  // Since the most scenario is process data in CPU
   // this function will return a pointer to cpu memory
   // this function will return a pointer to cpu memory
   // buffer.
   // buffer.
   // If the original data is on other device, the data
   // If the original data is on other device, the data

+ 1 - 1
libs/ultra-infer/ultra_infer/core/float16.h

@@ -151,7 +151,7 @@ public:
     return *this;
     return *this;
   }
   }
 
 
-// Conversion opertors
+// Conversion operators
 #ifdef FD_WITH_NATIVE_FP16
 #ifdef FD_WITH_NATIVE_FP16
   HOSTDEVICE inline explicit operator float16_t() const {
   HOSTDEVICE inline explicit operator float16_t() const {
     return *reinterpret_cast<const float16_t *>(this);
     return *reinterpret_cast<const float16_t *>(this);

+ 1 - 1
libs/ultra-infer/ultra_infer/function/clip.h

@@ -23,7 +23,7 @@ namespace function {
    Support float32, float64, int32, int64
    Support float32, float64, int32, int64
     @param x The input tensor.
     @param x The input tensor.
     @param min The lower bound
     @param min The lower bound
-    @param max The uppper bound
+    @param max The upper bound
     @param out The output tensor which stores the result.
     @param out The output tensor which stores the result.
 */
 */
 ULTRAINFER_DECL void Clip(const FDTensor &x, double min, double max,
 ULTRAINFER_DECL void Clip(const FDTensor &x, double min, double max,

+ 1 - 1
libs/ultra-infer/ultra_infer/function/concat.h

@@ -19,7 +19,7 @@
 namespace ultra_infer {
 namespace ultra_infer {
 namespace function {
 namespace function {
 
 
-/** Excute the concatenate operation for input FDTensor along given axis.
+/** Execute the concatenate operation for input FDTensor along given axis.
     @param x The input tensor.
     @param x The input tensor.
     @param out The output tensor which stores the result.
     @param out The output tensor which stores the result.
     @param axis Axis which will be concatenated.
     @param axis Axis which will be concatenated.

+ 1 - 1
libs/ultra-infer/ultra_infer/function/cumprod.h

@@ -19,7 +19,7 @@
 namespace ultra_infer {
 namespace ultra_infer {
 namespace function {
 namespace function {
 
 
-/** Excute the concatenate operation for input FDTensor along given axis.
+/** Execute the concatenate operation for input FDTensor along given axis.
     @param x The input tensor.
     @param x The input tensor.
     @param out The output tensor which stores the result.
     @param out The output tensor which stores the result.
     @param axisi Axis which will be concatenated.
     @param axisi Axis which will be concatenated.

+ 5 - 5
libs/ultra-infer/ultra_infer/function/elementwise.h

@@ -21,14 +21,14 @@ namespace ultra_infer {
 
 
 namespace function {
 namespace function {
 
 
-/** Excute the add operation for input FDTensors. *out = x + y.
+/** Execute the add operation for input FDTensors. *out = x + y.
     @param x The input tensor.
     @param x The input tensor.
     @param y The input tensor.
     @param y The input tensor.
     @param out The output tensor which stores the result.
     @param out The output tensor which stores the result.
 */
 */
 ULTRAINFER_DECL void Add(const FDTensor &x, const FDTensor &y, FDTensor *out);
 ULTRAINFER_DECL void Add(const FDTensor &x, const FDTensor &y, FDTensor *out);
 
 
-/** Excute the subtract operation for input FDTensors.  *out = x - y.
+/** Execute the subtract operation for input FDTensors.  *out = x - y.
     @param x The input tensor.
     @param x The input tensor.
     @param y The input tensor.
     @param y The input tensor.
     @param out The output tensor which stores the result.
     @param out The output tensor which stores the result.
@@ -36,7 +36,7 @@ ULTRAINFER_DECL void Add(const FDTensor &x, const FDTensor &y, FDTensor *out);
 ULTRAINFER_DECL void Subtract(const FDTensor &x, const FDTensor &y,
 ULTRAINFER_DECL void Subtract(const FDTensor &x, const FDTensor &y,
                               FDTensor *out);
                               FDTensor *out);
 
 
-/** Excute the multiply operation for input FDTensors.  *out = x * y.
+/** Execute the multiply operation for input FDTensors.  *out = x * y.
     @param x The input tensor.
     @param x The input tensor.
     @param y The input tensor.
     @param y The input tensor.
     @param out The output tensor which stores the result.
     @param out The output tensor which stores the result.
@@ -44,7 +44,7 @@ ULTRAINFER_DECL void Subtract(const FDTensor &x, const FDTensor &y,
 ULTRAINFER_DECL void Multiply(const FDTensor &x, const FDTensor &y,
 ULTRAINFER_DECL void Multiply(const FDTensor &x, const FDTensor &y,
                               FDTensor *out);
                               FDTensor *out);
 
 
-/** Excute the divide operation for input FDTensors.  *out = x / y.
+/** Execute the divide operation for input FDTensors.  *out = x / y.
     @param x The input tensor.
     @param x The input tensor.
     @param y The input tensor.
     @param y The input tensor.
     @param out The output tensor which stores the result.
     @param out The output tensor which stores the result.
@@ -52,7 +52,7 @@ ULTRAINFER_DECL void Multiply(const FDTensor &x, const FDTensor &y,
 ULTRAINFER_DECL void Divide(const FDTensor &x, const FDTensor &y,
 ULTRAINFER_DECL void Divide(const FDTensor &x, const FDTensor &y,
                             FDTensor *out);
                             FDTensor *out);
 
 
-/** Excute the maximum operation for input FDTensors.  *out = max(x, y).
+/** Execute the maximum operation for input FDTensors.  *out = max(x, y).
     @param x The input tensor.
     @param x The input tensor.
     @param y The input tensor.
     @param y The input tensor.
     @param out The output tensor which stores the result.
     @param out The output tensor which stores the result.

+ 1 - 1
libs/ultra-infer/ultra_infer/function/elementwise_functor.h

@@ -112,7 +112,7 @@ template <typename T>
 struct DivideFunctor<
 struct DivideFunctor<
     T, typename std::enable_if<std::is_integral<T>::value>::type> {
     T, typename std::enable_if<std::is_integral<T>::value>::type> {
   inline T operator()(const T a, const T b) const {
   inline T operator()(const T a, const T b) const {
-    // For int32/int64, need to check whether the divison is zero.
+    // For int32/int64, need to check whether the division is zero.
     FDASSERT(b != 0, DIV_ERROR_INFO);
     FDASSERT(b != 0, DIV_ERROR_INFO);
     return a / b;
     return a / b;
   }
   }

+ 1 - 1
libs/ultra-infer/ultra_infer/function/pad.h

@@ -18,7 +18,7 @@
 
 
 namespace ultra_infer {
 namespace ultra_infer {
 namespace function {
 namespace function {
-/** Excute the pad operation for input FDTensor along given dims.
+/** Execute the pad operation for input FDTensor along given dims.
     @param x The input tensor.
     @param x The input tensor.
     @param out The output tensor which stores the result.
     @param out The output tensor which stores the result.
     @param pads The size of padding for each dimension, for 3-D tensor, the pads
     @param pads The size of padding for each dimension, for 3-D tensor, the pads

+ 1 - 1
libs/ultra-infer/ultra_infer/function/reduce.cc

@@ -260,7 +260,7 @@ template <typename T, typename Tout, ArgMinMaxType EnumArgMinMaxValue>
 void ArgMinMaxKernel(const FDTensor &x, FDTensor *out, int64_t axis,
 void ArgMinMaxKernel(const FDTensor &x, FDTensor *out, int64_t axis,
                      bool keepdims, bool flatten) {
                      bool keepdims, bool flatten) {
   bool new_keepdims = keepdims | flatten;
   bool new_keepdims = keepdims | flatten;
-  // if flatten, will construct the new dims for the cacluate
+  // if flatten, will construct the new dims for the calculate
   std::vector<int64_t> x_dims;
   std::vector<int64_t> x_dims;
   int new_axis = axis;
   int new_axis = axis;
   if (flatten) {
   if (flatten) {

+ 9 - 9
libs/ultra-infer/ultra_infer/function/reduce.h

@@ -18,7 +18,7 @@
 
 
 namespace ultra_infer {
 namespace ultra_infer {
 namespace function {
 namespace function {
-/** Excute the maximum operation for input FDTensor along given dims.
+/** Execute the maximum operation for input FDTensor along given dims.
     @param x The input tensor.
     @param x The input tensor.
     @param out The output tensor which stores the result.
     @param out The output tensor which stores the result.
     @param dims The vector of axis which will be reduced.
     @param dims The vector of axis which will be reduced.
@@ -29,7 +29,7 @@ ULTRAINFER_DECL void Max(const FDTensor &x, FDTensor *out,
                          const std::vector<int64_t> &dims,
                          const std::vector<int64_t> &dims,
                          bool keep_dim = false, bool reduce_all = false);
                          bool keep_dim = false, bool reduce_all = false);
 
 
-/** Excute the minimum operation for input FDTensor along given dims.
+/** Execute the minimum operation for input FDTensor along given dims.
     @param x The input tensor.
     @param x The input tensor.
     @param out The output tensor which stores the result.
     @param out The output tensor which stores the result.
     @param dims The vector of axis which will be reduced.
     @param dims The vector of axis which will be reduced.
@@ -40,7 +40,7 @@ ULTRAINFER_DECL void Min(const FDTensor &x, FDTensor *out,
                          const std::vector<int64_t> &dims,
                          const std::vector<int64_t> &dims,
                          bool keep_dim = false, bool reduce_all = false);
                          bool keep_dim = false, bool reduce_all = false);
 
 
-/** Excute the sum operation for input FDTensor along given dims.
+/** Execute the sum operation for input FDTensor along given dims.
     @param x The input tensor.
     @param x The input tensor.
     @param out The output tensor which stores the result.
     @param out The output tensor which stores the result.
     @param dims The vector of axis which will be reduced.
     @param dims The vector of axis which will be reduced.
@@ -51,7 +51,7 @@ ULTRAINFER_DECL void Sum(const FDTensor &x, FDTensor *out,
                          const std::vector<int64_t> &dims,
                          const std::vector<int64_t> &dims,
                          bool keep_dim = false, bool reduce_all = false);
                          bool keep_dim = false, bool reduce_all = false);
 
 
-/** Excute the all operation for input FDTensor along given dims.
+/** Execute the all operation for input FDTensor along given dims.
     @param x The input tensor.
     @param x The input tensor.
     @param out The output tensor which stores the result.
     @param out The output tensor which stores the result.
     @param dims The vector of axis which will be reduced.
     @param dims The vector of axis which will be reduced.
@@ -62,7 +62,7 @@ ULTRAINFER_DECL void All(const FDTensor &x, FDTensor *out,
                          const std::vector<int64_t> &dims,
                          const std::vector<int64_t> &dims,
                          bool keep_dim = false, bool reduce_all = false);
                          bool keep_dim = false, bool reduce_all = false);
 
 
-/** Excute the any operation for input FDTensor along given dims.
+/** Execute the any operation for input FDTensor along given dims.
     @param x The input tensor.
     @param x The input tensor.
     @param out The output tensor which stores the result.
     @param out The output tensor which stores the result.
     @param dims The vector of axis which will be reduced.
     @param dims The vector of axis which will be reduced.
@@ -73,7 +73,7 @@ ULTRAINFER_DECL void Any(const FDTensor &x, FDTensor *out,
                          const std::vector<int64_t> &dims,
                          const std::vector<int64_t> &dims,
                          bool keep_dim = false, bool reduce_all = false);
                          bool keep_dim = false, bool reduce_all = false);
 
 
-/** Excute the mean operation for input FDTensor along given dims.
+/** Execute the mean operation for input FDTensor along given dims.
     @param x The input tensor.
     @param x The input tensor.
     @param out The output tensor which stores the result.
     @param out The output tensor which stores the result.
     @param dims The vector of axis which will be reduced.
     @param dims The vector of axis which will be reduced.
@@ -84,7 +84,7 @@ ULTRAINFER_DECL void Mean(const FDTensor &x, FDTensor *out,
                           const std::vector<int64_t> &dims,
                           const std::vector<int64_t> &dims,
                           bool keep_dim = false, bool reduce_all = false);
                           bool keep_dim = false, bool reduce_all = false);
 
 
-/** Excute the product operation for input FDTensor along given dims.
+/** Execute the product operation for input FDTensor along given dims.
     @param x The input tensor.
     @param x The input tensor.
     @param out The output tensor which stores the result.
     @param out The output tensor which stores the result.
     @param dims The vector of axis which will be reduced.
     @param dims The vector of axis which will be reduced.
@@ -95,7 +95,7 @@ ULTRAINFER_DECL void Prod(const FDTensor &x, FDTensor *out,
                           const std::vector<int64_t> &dims,
                           const std::vector<int64_t> &dims,
                           bool keep_dim = false, bool reduce_all = false);
                           bool keep_dim = false, bool reduce_all = false);
 
 
-/** Excute the argmax operation for input FDTensor along given dims.
+/** Execute the argmax operation for input FDTensor along given dims.
     @param x The input tensor.
     @param x The input tensor.
     @param out The output tensor which stores the result.
     @param out The output tensor which stores the result.
     @param axis The axis which will be reduced.
     @param axis The axis which will be reduced.
@@ -109,7 +109,7 @@ ULTRAINFER_DECL void ArgMax(const FDTensor &x, FDTensor *out, int64_t axis,
                             FDDataType output_dtype = FDDataType::INT64,
                             FDDataType output_dtype = FDDataType::INT64,
                             bool keep_dim = false, bool flatten = false);
                             bool keep_dim = false, bool flatten = false);
 
 
-/** Excute the argmin operation for input FDTensor along given dims.
+/** Execute the argmin operation for input FDTensor along given dims.
     @param x The input tensor.
     @param x The input tensor.
     @param out The output tensor which stores the result.
     @param out The output tensor which stores the result.
     @param axis The axis which will be reduced.
     @param axis The axis which will be reduced.

+ 1 - 1
libs/ultra-infer/ultra_infer/function/softmax.h

@@ -18,7 +18,7 @@
 
 
 namespace ultra_infer {
 namespace ultra_infer {
 namespace function {
 namespace function {
-/** Excute the softmax operation for input FDTensor along given dims.
+/** Execute the softmax operation for input FDTensor along given dims.
     @param x The input tensor.
     @param x The input tensor.
     @param out The output tensor which stores the result.
     @param out The output tensor which stores the result.
     @param axis The axis to be computed softmax value.
     @param axis The axis to be computed softmax value.

+ 1 - 1
libs/ultra-infer/ultra_infer/function/transpose.h

@@ -22,7 +22,7 @@ namespace ultra_infer {
  *
  *
  */
  */
 namespace function {
 namespace function {
-/** Excute the transpose operation for input FDTensor along given dims.
+/** Execute the transpose operation for input FDTensor along given dims.
     @param x The input tensor.
     @param x The input tensor.
     @param out The output tensor which stores the result.
     @param out The output tensor which stores the result.
     @param dims The vector of axis which the input tensor will transpose.
     @param dims The vector of axis which the input tensor will transpose.

+ 1 - 1
libs/ultra-infer/ultra_infer/pipeline/pptinypose/pipeline.h

@@ -42,7 +42,7 @@ public:
   /** \brief Predict the keypoint detection result for an input image
   /** \brief Predict the keypoint detection result for an input image
    *
    *
    * \param[in] img The input image data, comes from cv::imread()
    * \param[in] img The input image data, comes from cv::imread()
-   * \param[in] result The output keypoint detection result will be writen to
+   * \param[in] result The output keypoint detection result will be written to
    * this structure \return true if the prediction successed, otherwise false
    * this structure \return true if the prediction successed, otherwise false
    */
    */
   virtual bool Predict(cv::Mat *img,
   virtual bool Predict(cv::Mat *img,

+ 1 - 1
libs/ultra-infer/ultra_infer/pybind/fd_tensor.cc

@@ -143,7 +143,7 @@ pybind11::capsule FDTensorToDLPack(FDTensor &fd_tensor) {
   pybind11::handle tensor_handle = pybind11::cast(&fd_tensor);
   pybind11::handle tensor_handle = pybind11::cast(&fd_tensor);
 
 
   // Increase the reference count by one to make sure that the DLPack
   // Increase the reference count by one to make sure that the DLPack
-  // represenation doesn't become invalid when the tensor object goes out of
+  // representation doesn't become invalid when the tensor object goes out of
   // scope.
   // scope.
   tensor_handle.inc_ref();
   tensor_handle.inc_ref();
 
 

+ 1 - 1
libs/ultra-infer/ultra_infer/pybind/main.cc.in

@@ -156,7 +156,7 @@ cv::Mat PyArrayToCvMat(pybind11::array& pyarray) {
 
 
 PYBIND11_MODULE(@PY_LIBRARY_NAME@, m) {
 PYBIND11_MODULE(@PY_LIBRARY_NAME@, m) {
   m.doc() =
   m.doc() =
-      "Make programer easier to deploy deeplearning model, save time to save "
+      "Make programmer easier to deploy deeplearning model, save time to save "
       "the world!";
       "the world!";
 
 
   m.def("set_logger", &SetLogger);
   m.def("set_logger", &SetLogger);

+ 1 - 1
libs/ultra-infer/ultra_infer/runtime/backends/backend.h

@@ -142,7 +142,7 @@ public:
  *
  *
  * @endcode In this case, 'poros_outputs' inside a function
  * @endcode In this case, 'poros_outputs' inside a function
  * are wrapped by BEGIN and END, which may be required for
  * are wrapped by BEGIN and END, which may be required for
- * subsequent tasks. So, we set 'base_loop' as 0 and lanuch
+ * subsequent tasks. So, we set 'base_loop' as 0 and launch
  * another infer to get the valid outputs beyond the scope
  * another infer to get the valid outputs beyond the scope
  * of 'BEGIN ~ END' for subsequent tasks.
  * of 'BEGIN ~ END' for subsequent tasks.
  */
  */

+ 1 - 1
libs/ultra-infer/ultra_infer/runtime/backends/lite/lite_backend.cc

@@ -123,7 +123,7 @@ bool LiteBackend::Init(const RuntimeOption &runtime_option) {
             config_);
             config_);
     if (option_.optimized_model_dir != "") {
     if (option_.optimized_model_dir != "") {
       FDINFO
       FDINFO
-          << "Optimzed model dir is not empty, will save optimized model to: "
+          << "Optimized model dir is not empty, will save optimized model to: "
           << option_.optimized_model_dir << std::endl;
           << option_.optimized_model_dir << std::endl;
       predictor_->SaveOptimizedModel(
       predictor_->SaveOptimizedModel(
           option_.optimized_model_dir,
           option_.optimized_model_dir,

+ 1 - 1
libs/ultra-infer/ultra_infer/runtime/backends/om/om_backend.cc

@@ -392,7 +392,7 @@ bool OmBackend::CreateInput() {
 
 
 bool OmBackend::CreateOutput() {
 bool OmBackend::CreateOutput() {
   if (modelDesc_ == nullptr) {
   if (modelDesc_ == nullptr) {
-    FDERROR << "no model description, create ouput failed";
+    FDERROR << "no model description, create output failed";
     return false;
     return false;
   }
   }
 
 

+ 1 - 1
libs/ultra-infer/ultra_infer/runtime/backends/ort/ort_backend.cc

@@ -195,7 +195,7 @@ bool OrtBackend::InitFromPaddle(const std::string &model_buffer,
           true, verbose, true, true, true, ops.data(), 2, "onnxruntime",
           true, verbose, true, true, true, ops.data(), 2, "onnxruntime",
           nullptr, 0, "", &save_external, option.enable_fp16,
           nullptr, 0, "", &save_external, option.enable_fp16,
           disable_fp16_ops.data(), option.ort_disabled_ops_.size())) {
           disable_fp16_ops.data(), option.ort_disabled_ops_.size())) {
-    FDERROR << "Error occured while export PaddlePaddle to ONNX format."
+    FDERROR << "Error occurred while export PaddlePaddle to ONNX format."
             << std::endl;
             << std::endl;
     return false;
     return false;
   }
   }

+ 1 - 1
libs/ultra-infer/ultra_infer/runtime/backends/ort/ort_backend.h

@@ -78,7 +78,7 @@ private:
   // the ONNX model file name,
   // the ONNX model file name,
   // when ONNX is bigger than 2G, we will set this name
   // when ONNX is bigger than 2G, we will set this name
   std::string model_file_name;
   std::string model_file_name;
-  // recored if the model has been converted to fp16
+  // recorded if the model has been converted to fp16
   bool converted_to_fp16 = false;
   bool converted_to_fp16 = false;
 
 
 #ifndef NON_64_PLATFORM
 #ifndef NON_64_PLATFORM

+ 2 - 2
libs/ultra-infer/ultra_infer/runtime/backends/paddle/ops/grid_sample_3d.cu

@@ -232,7 +232,7 @@ GridSample3DCudaKernel(const index_t nthreads, index_t out_c, index_t out_d,
       index_t iy_nearest = static_cast<index_t>(std::round(iy));
       index_t iy_nearest = static_cast<index_t>(std::round(iy));
       index_t iz_nearest = static_cast<index_t>(std::round(iz));
       index_t iz_nearest = static_cast<index_t>(std::round(iz));
 
 
-      // assign nearest neighor pixel value to output pixel
+      // assign nearest neighbor pixel value to output pixel
       auto inp_ptr_NC = input + n * inp_sN;
       auto inp_ptr_NC = input + n * inp_sN;
       auto out_ptr_NCDHW =
       auto out_ptr_NCDHW =
           output + n * out_sN + d * out_sD + h * out_sH + w * out_sW;
           output + n * out_sN + d * out_sD + h * out_sH + w * out_sW;
@@ -583,7 +583,7 @@ __global__ void GridSample3DCudaBackwardKernel(
       auto iy_nearest = static_cast<index_t>(std::round(iy));
       auto iy_nearest = static_cast<index_t>(std::round(iy));
       auto iz_nearest = static_cast<index_t>(std::round(iz));
       auto iz_nearest = static_cast<index_t>(std::round(iz));
 
 
-      // assign nearest neighor pixel value to output pixel
+      // assign nearest neighbor pixel value to output pixel
       index_t gOut_offset =
       index_t gOut_offset =
           n * gOut_sN + d * gOut_sD + h * gOut_sH + w * gOut_sW;
           n * gOut_sN + d * gOut_sD + h * gOut_sH + w * gOut_sW;
       T *gInp_ptr_NC = grad_input + n * inp_sN;
       T *gInp_ptr_NC = grad_input + n * inp_sN;

+ 2 - 2
libs/ultra-infer/ultra_infer/runtime/backends/paddle/ops/iou3d_nms.cc

@@ -143,7 +143,7 @@ std::vector<paddle::Tensor> nms_gpu(const paddle::Tensor &boxes,
   cudaFree(mask_data);
   cudaFree(mask_data);
 
 
   // WARN(qiuyanjun): codes below will throw a compile error on windows with
   // WARN(qiuyanjun): codes below will throw a compile error on windows with
-  // msvc. Thus, we choosed to use std::vectored to store the result instead.
+  // msvc. Thus, we chose to use std::vectored to store the result instead.
   // unsigned long long remv_cpu[col_blocks];
   // unsigned long long remv_cpu[col_blocks];
   // memset(remv_cpu, 0, col_blocks * sizeof(unsigned long long));
   // memset(remv_cpu, 0, col_blocks * sizeof(unsigned long long));
   std::vector<unsigned long long> remv_cpu(col_blocks, 0);
   std::vector<unsigned long long> remv_cpu(col_blocks, 0);
@@ -210,7 +210,7 @@ int nms_normal_gpu(paddle::Tensor boxes, paddle::Tensor keep,
   cudaFree(mask_data);
   cudaFree(mask_data);
 
 
   // WARN(qiuyanjun): codes below will throw a compile error on windows with
   // WARN(qiuyanjun): codes below will throw a compile error on windows with
-  // msvc. Thus, we choosed to use std::vectored to store the result instead.
+  // msvc. Thus, we chose to use std::vectored to store the result instead.
   // unsigned long long remv_cpu[col_blocks];
   // unsigned long long remv_cpu[col_blocks];
   // memset(remv_cpu, 0, col_blocks * sizeof(unsigned long long));
   // memset(remv_cpu, 0, col_blocks * sizeof(unsigned long long));
   std::vector<unsigned long long> remv_cpu(col_blocks, 0);
   std::vector<unsigned long long> remv_cpu(col_blocks, 0);

+ 1 - 1
libs/ultra-infer/ultra_infer/runtime/backends/paddle/ops/iou3d_nms_kernel.cu

@@ -133,7 +133,7 @@ __device__ inline void rotate_around_center(const Point &center,
   // float new_y = (p.x - center.x) * angle_sin + (p.y - center.y) * angle_cos +
   // float new_y = (p.x - center.x) * angle_sin + (p.y - center.y) * angle_cos +
   // center.y;
   // center.y;
   // p.set(new_x, new_y);
   // p.set(new_x, new_y);
-  // Aligh with the implement of mmdet3d
+  // Align with the implement of mmdet3d
   float new_x =
   float new_x =
       (p.x - center.x) * angle_cos + (p.y - center.y) * angle_sin + center.x;
       (p.x - center.x) * angle_cos + (p.y - center.y) * angle_sin + center.x;
   float new_y =
   float new_y =

+ 2 - 2
libs/ultra-infer/ultra_infer/runtime/backends/paddle/option.h

@@ -108,7 +108,7 @@ struct PaddleBackendOption {
   bool collect_trt_shape = false;
   bool collect_trt_shape = false;
   /// Collect shape for model by device (for some custom ops)
   /// Collect shape for model by device (for some custom ops)
   bool collect_trt_shape_by_device = false;
   bool collect_trt_shape_by_device = false;
-  /// Cache input shape for mkldnn while the input data will change dynamiclly
+  /// Cache input shape for mkldnn while the input data will change dynamically
   int mkldnn_cache_size = -1;
   int mkldnn_cache_size = -1;
   /// initialize memory size(MB) for GPU
   /// initialize memory size(MB) for GPU
   int gpu_mem_init_size = 100;
   int gpu_mem_init_size = 100;
@@ -165,7 +165,7 @@ struct PaddleBackendOption {
   std::string model_file = "";  // Path of model file
   std::string model_file = "";  // Path of model file
   std::string params_file = ""; // Path of parameters file, can be empty
   std::string params_file = ""; // Path of parameters file, can be empty
 
 
-  // load model and paramters from memory
+  // load model and parameters from memory
   bool model_from_memory_ = false;
   bool model_from_memory_ = false;
 };
 };
 } // namespace ultra_infer
 } // namespace ultra_infer

+ 1 - 1
libs/ultra-infer/ultra_infer/runtime/backends/paddle/paddle_backend.cc

@@ -205,7 +205,7 @@ bool PaddleBackend::Init(const RuntimeOption &runtime_option) {
   option.paddle_infer_option.trt_option.gpu_id = runtime_option.device_id;
   option.paddle_infer_option.trt_option.gpu_id = runtime_option.device_id;
   // Note(qiuyanjun): For Ipu option and XPU option, please check the
   // Note(qiuyanjun): For Ipu option and XPU option, please check the
   // details of RuntimeOption::UseIpu() and RuntimeOption::UseKunlunXin().
   // details of RuntimeOption::UseIpu() and RuntimeOption::UseKunlunXin().
-  // Futhermore, please check paddle_infer_option.SetIpuConfig() and
+  // Furthermore, please check paddle_infer_option.SetIpuConfig() and
   // paddle_infer_option.SetXpuConfig() for more details of extra configs.
   // paddle_infer_option.SetXpuConfig() for more details of extra configs.
   return InitFromPaddle(option.model_file, option.params_file,
   return InitFromPaddle(option.model_file, option.params_file,
                         option.model_from_memory_, option.paddle_infer_option);
                         option.model_from_memory_, option.paddle_infer_option);

+ 2 - 2
libs/ultra-infer/ultra_infer/runtime/backends/poros/common/compile.h

@@ -84,7 +84,7 @@ private:
                        std::shared_ptr<torch::jit::Graph> &graph);
                        std::shared_ptr<torch::jit::Graph> &graph);
 
 
   /**
   /**
-   * @brief segement this calculation graph
+   * @brief segment this calculation graph
    *
    *
    * @param [in/out] graph
    * @param [in/out] graph
    * @return  int
    * @return  int
@@ -136,7 +136,7 @@ private:
   IEngine *select_engine(const torch::jit::Node *n);
   IEngine *select_engine(const torch::jit::Node *n);
 
 
   /**
   /**
-   * @brief destory
+   * @brief destroy
    *
    *
    * @return  void
    * @return  void
    **/
    **/

+ 2 - 2
libs/ultra-infer/ultra_infer/runtime/backends/rknpu2/option.h

@@ -25,8 +25,8 @@ typedef enum _rknpu2_cpu_name {
  * RKNN_NPU_CORE_AUTO : Referring to automatic mode, meaning that it will
  * RKNN_NPU_CORE_AUTO : Referring to automatic mode, meaning that it will
  * select the idle core inside the NPU.
  * select the idle core inside the NPU.
  * RKNN_NPU_CORE_0 : Running on the NPU0 core.
  * RKNN_NPU_CORE_0 : Running on the NPU0 core.
- * RKNN_NPU_CORE_1: Runing on the NPU1 core.
- * RKNN_NPU_CORE_2: Runing on the NPU2 core.
+ * RKNN_NPU_CORE_1: Running on the NPU1 core.
+ * RKNN_NPU_CORE_2: Running on the NPU2 core.
  * RKNN_NPU_CORE_0_1: Running on both NPU0 and NPU1 core simultaneously.
  * RKNN_NPU_CORE_0_1: Running on both NPU0 and NPU1 core simultaneously.
  * RKNN_NPU_CORE_0_1_2: Running on both NPU0, NPU1 and NPU2 simultaneously.
  * RKNN_NPU_CORE_0_1_2: Running on both NPU0, NPU1 and NPU2 simultaneously.
  */
  */

+ 2 - 2
libs/ultra-infer/ultra_infer/runtime/backends/tensorrt/trt_backend.cc

@@ -187,7 +187,7 @@ bool TrtBackend::InitFromPaddle(const std::string &model_buffer,
                            verbose, true, true, true, ops.data(), 1, "tensorrt",
                            verbose, true, true, true, ops.data(), 1, "tensorrt",
                            &calibration_cache_ptr, &calibration_cache_size, "",
                            &calibration_cache_ptr, &calibration_cache_size, "",
                            &save_external_)) {
                            &save_external_)) {
-    FDERROR << "Error occured while export PaddlePaddle to ONNX format."
+    FDERROR << "Error occurred while export PaddlePaddle to ONNX format."
             << std::endl;
             << std::endl;
     return false;
     return false;
   }
   }
@@ -671,7 +671,7 @@ bool TrtBackend::BuildTrtEngine() {
     engine_file.close();
     engine_file.close();
     FDINFO << "TensorRTEngine is serialized to local file "
     FDINFO << "TensorRTEngine is serialized to local file "
            << option_.serialize_file
            << option_.serialize_file
-           << ", we can load this model from the seralized engine "
+           << ", we can load this model from the serialized engine "
               "directly next time."
               "directly next time."
            << std::endl;
            << std::endl;
   }
   }

+ 2 - 2
libs/ultra-infer/ultra_infer/runtime/backends/tensorrt/trt_backend.h

@@ -123,7 +123,7 @@ private:
   // the output order of tensorrt may not be same
   // the output order of tensorrt may not be same
   // with the original onnx model
   // with the original onnx model
   // So this parameter will record to origin outputs
   // So this parameter will record to origin outputs
-  // order, to help recover the rigt order
+  // order, to help recover the right order
   std::map<std::string, int> outputs_order_;
   std::map<std::string, int> outputs_order_;
 
 
   // temporary store onnx model content
   // temporary store onnx model content
@@ -131,7 +131,7 @@ private:
   // it will be released
   // it will be released
   std::string onnx_model_buffer_;
   std::string onnx_model_buffer_;
   // Stores shape information of the loaded model
   // Stores shape information of the loaded model
-  // For dynmaic shape will record its range information
+  // For dynamic shape will record its range information
   // Also will update the range information while inferencing
   // Also will update the range information while inferencing
   std::map<std::string, ShapeRangeInfo> shape_range_info_;
   std::map<std::string, ShapeRangeInfo> shape_range_info_;
 
 

+ 3 - 3
libs/ultra-infer/ultra_infer/runtime/runtime.cc

@@ -97,7 +97,7 @@ bool AutoSelectBackend(RuntimeOption &option) {
   }
   }
 
 
   if (candidates.size() == 0) {
   if (candidates.size() == 0) {
-    FDERROR << "Cannot found availabel inference backends by model format: "
+    FDERROR << "Cannot found available inference backends by model format: "
             << option.model_format << " with device: " << option.device
             << option.model_format << " with device: " << option.device
             << std::endl;
             << std::endl;
     return false;
     return false;
@@ -112,7 +112,7 @@ bool AutoSelectBackend(RuntimeOption &option) {
     }
     }
   }
   }
   std::string debug_message = Str(candidates);
   std::string debug_message = Str(candidates);
-  FDERROR << "The candiate backends for " << option.model_format << " & "
+  FDERROR << "The candidate backends for " << option.model_format << " & "
           << option.device << " are " << debug_message
           << option.device << " are " << debug_message
           << ", but both of them have not been compiled with current "
           << ", but both of them have not been compiled with current "
              "UltraInfer yet."
              "UltraInfer yet."
@@ -428,7 +428,7 @@ bool Runtime::Compile(std::vector<std::vector<FDTensor>> &prewarm_tensors) {
   FDASSERT(
   FDASSERT(
       casted_backend->Compile(option.model_file, prewarm_tensors,
       casted_backend->Compile(option.model_file, prewarm_tensors,
                               option.poros_option),
                               option.poros_option),
-      "Load model from Torchscript failed while initliazing PorosBackend.");
+      "Load model from Torchscript failed while initializing PorosBackend.");
 #else
 #else
   FDASSERT(false, "PorosBackend is not available, please compiled with "
   FDASSERT(false, "PorosBackend is not available, please compiled with "
                   "ENABLE_POROS_BACKEND=ON.");
                   "ENABLE_POROS_BACKEND=ON.");

+ 2 - 2
libs/ultra-infer/ultra_infer/runtime/runtime.h

@@ -33,7 +33,7 @@ namespace ultra_infer {
  */
  */
 struct ULTRAINFER_DECL Runtime {
 struct ULTRAINFER_DECL Runtime {
 public:
 public:
-  /// Intialize a Runtime object with RuntimeOption
+  /// Initialize a Runtime object with RuntimeOption
   bool Init(const RuntimeOption &_option);
   bool Init(const RuntimeOption &_option);
 
 
   /** \brief Inference the model by the input data, and write to the output
   /** \brief Inference the model by the input data, and write to the output
@@ -87,7 +87,7 @@ public:
   /** \brief Clone new Runtime when multiple instances of the same model are
   /** \brief Clone new Runtime when multiple instances of the same model are
    * created
    * created
    *
    *
-   * \param[in] stream CUDA Stream, defualt param is nullptr
+   * \param[in] stream CUDA Stream, default param is nullptr
    * \return new Runtime* by this clone
    * \return new Runtime* by this clone
    */
    */
   Runtime *Clone(void *stream = nullptr, int device_id = -1);
   Runtime *Clone(void *stream = nullptr, int device_id = -1);

+ 1 - 1
libs/ultra-infer/ultra_infer/vision/classification/contrib/resnet.cc

@@ -82,7 +82,7 @@ bool ResNet::Postprocess(FDTensor &infer_result, ClassifyResult *result,
                          int topk) {
                          int topk) {
   // In this function, the postprocess need be implemented according to the
   // In this function, the postprocess need be implemented according to the
   // original Repos,
   // original Repos,
-  // Finally the reslut of postprocess should be saved in ClassifyResult
+  // Finally the result of postprocess should be saved in ClassifyResult
   // variable.
   // variable.
   // 1. Softmax 2. Choose topk labels 3. Put the result into ClassifyResult
   // 1. Softmax 2. Choose topk labels 3. Put the result into ClassifyResult
   // variable.
   // variable.

+ 1 - 1
libs/ultra-infer/ultra_infer/vision/classification/contrib/resnet_pybind.cc

@@ -17,7 +17,7 @@
 namespace ultra_infer {
 namespace ultra_infer {
 // the name of Pybind function should be Bind${model_name}
 // the name of Pybind function should be Bind${model_name}
 void BindResNet(pybind11::module &m) {
 void BindResNet(pybind11::module &m) {
-  // the constructor and the predict funtion are necessary
+  // the constructor and the predict function are necessary
   // the constructor is used to initialize the python model class.
   // the constructor is used to initialize the python model class.
   // the necessary public functions and variables like `size`, `mean_vals`
   // the necessary public functions and variables like `size`, `mean_vals`
   // should also be binded.
   // should also be binded.

+ 1 - 1
libs/ultra-infer/ultra_infer/vision/classification/contrib/yolov5cls/yolov5cls.h

@@ -45,7 +45,7 @@ public:
    *
    *
    * \param[in] img The input image data, comes from cv::imread(), is a 3-D
    * \param[in] img The input image data, comes from cv::imread(), is a 3-D
    * array with layout HWC, BGR format \param[in] result The output
    * array with layout HWC, BGR format \param[in] result The output
-   * classification result will be writen to this structure \return true if the
+   * classification result will be written to this structure \return true if the
    * prediction successed, otherwise false
    * prediction successed, otherwise false
    */
    */
   virtual bool Predict(const cv::Mat &img, ClassifyResult *result);
   virtual bool Predict(const cv::Mat &img, ClassifyResult *result);

+ 1 - 1
libs/ultra-infer/ultra_infer/vision/classification/ppcls/model.h

@@ -59,7 +59,7 @@ public:
    * remove at 1.0 version
    * remove at 1.0 version
    *
    *
    * \param[in] im The input image data, comes from cv::imread()
    * \param[in] im The input image data, comes from cv::imread()
-   * \param[in] result The output classification result will be writen to this
+   * \param[in] result The output classification result will be written to this
    * structure \return true if the prediction successed, otherwise false
    * structure \return true if the prediction successed, otherwise false
    */
    */
   virtual bool Predict(cv::Mat *im, ClassifyResult *result, int topk = 1);
   virtual bool Predict(cv::Mat *im, ClassifyResult *result, int topk = 1);

+ 1 - 1
libs/ultra-infer/ultra_infer/vision/classification/ppcls/preprocessor.h

@@ -51,7 +51,7 @@ public:
    *     maybe it's better to run resize on CPU, because the HostToDevice memcpy
    *     maybe it's better to run resize on CPU, because the HostToDevice memcpy
    *     is time consuming. Set this true to run the initial resize on CPU.
    *     is time consuming. Set this true to run the initial resize on CPU.
    *
    *
-   * \param[in] v ture or false
+   * \param[in] v true or false
    */
    */
   void InitialResizeOnCpu(bool v) { initial_resize_on_cpu_ = v; }
   void InitialResizeOnCpu(bool v) { initial_resize_on_cpu_ = v; }
 
 

+ 1 - 1
libs/ultra-infer/ultra_infer/vision/classification/ppshitu/ppshituv2_rec.h

@@ -57,7 +57,7 @@ public:
    * remove at 1.0 version
    * remove at 1.0 version
    *
    *
    * \param[in] im The input image data, comes from cv::imread()
    * \param[in] im The input image data, comes from cv::imread()
-   * \param[in] result The output feature vector result will be writen to this
+   * \param[in] result The output feature vector result will be written to this
    * structure \return true if the prediction successed, otherwise false
    * structure \return true if the prediction successed, otherwise false
    */
    */
   virtual bool Predict(cv::Mat *im, ClassifyResult *result);
   virtual bool Predict(cv::Mat *im, ClassifyResult *result);

+ 1 - 1
libs/ultra-infer/ultra_infer/vision/classification/ppshitu/ppshituv2_rec_preprocessor.h

@@ -51,7 +51,7 @@ public:
    *     maybe it's better to run resize on CPU, because the HostToDevice memcpy
    *     maybe it's better to run resize on CPU, because the HostToDevice memcpy
    *     is time consuming. Set this true to run the initial resize on CPU.
    *     is time consuming. Set this true to run the initial resize on CPU.
    *
    *
-   * \param[in] v ture or false
+   * \param[in] v true or false
    */
    */
   void InitialResizeOnCpu(bool v) { initial_resize_on_cpu_ = v; }
   void InitialResizeOnCpu(bool v) { initial_resize_on_cpu_ = v; }
 
 

+ 1 - 1
libs/ultra-infer/ultra_infer/vision/common/processors/cast.h

@@ -24,7 +24,7 @@
 namespace ultra_infer {
 namespace ultra_infer {
 namespace vision {
 namespace vision {
 
 
-/*! @brief Processor for cast images with given type deafault is float.
+/*! @brief Processor for cast images with given type default is float.
  */
  */
 class ULTRAINFER_DECL Cast : public Processor {
 class ULTRAINFER_DECL Cast : public Processor {
 public:
 public:

+ 1 - 1
libs/ultra-infer/ultra_infer/vision/common/processors/center_crop.h

@@ -24,7 +24,7 @@
 namespace ultra_infer {
 namespace ultra_infer {
 namespace vision {
 namespace vision {
 
 
-/*! @brief Processor for crop images in center with given type deafault is
+/*! @brief Processor for crop images in center with given type default is
  * float.
  * float.
  */
  */
 class ULTRAINFER_DECL CenterCrop : public Processor {
 class ULTRAINFER_DECL CenterCrop : public Processor {

+ 4 - 4
libs/ultra-infer/ultra_infer/vision/common/processors/color_space_convert.h

@@ -19,7 +19,7 @@
 namespace ultra_infer {
 namespace ultra_infer {
 namespace vision {
 namespace vision {
 
 
-/*! @brief Processor for tansform images from BGR to RGB.
+/*! @brief Processor for transform images from BGR to RGB.
  */
  */
 class ULTRAINFER_DECL BGR2RGB : public Processor {
 class ULTRAINFER_DECL BGR2RGB : public Processor {
 public:
 public:
@@ -38,7 +38,7 @@ public:
   static bool Run(FDMat *mat, ProcLib lib = ProcLib::DEFAULT);
   static bool Run(FDMat *mat, ProcLib lib = ProcLib::DEFAULT);
 };
 };
 
 
-/*! @brief Processor for tansform images from RGB to BGR.
+/*! @brief Processor for transform images from RGB to BGR.
  */
  */
 class ULTRAINFER_DECL RGB2BGR : public Processor {
 class ULTRAINFER_DECL RGB2BGR : public Processor {
 public:
 public:
@@ -57,7 +57,7 @@ public:
   static bool Run(FDMat *mat, ProcLib lib = ProcLib::DEFAULT);
   static bool Run(FDMat *mat, ProcLib lib = ProcLib::DEFAULT);
 };
 };
 
 
-/*! @brief Processor for tansform images from BGR to GRAY.
+/*! @brief Processor for transform images from BGR to GRAY.
  */
  */
 class ULTRAINFER_DECL BGR2GRAY : public Processor {
 class ULTRAINFER_DECL BGR2GRAY : public Processor {
 public:
 public:
@@ -76,7 +76,7 @@ public:
   static bool Run(FDMat *mat, ProcLib lib = ProcLib::DEFAULT);
   static bool Run(FDMat *mat, ProcLib lib = ProcLib::DEFAULT);
 };
 };
 
 
-/*! @brief Processor for tansform images from RGB to GRAY.
+/*! @brief Processor for transform images from RGB to GRAY.
  */
  */
 class ULTRAINFER_DECL RGB2GRAY : public Processor {
 class ULTRAINFER_DECL RGB2GRAY : public Processor {
 public:
 public:

+ 1 - 1
libs/ultra-infer/ultra_infer/vision/common/processors/convert.h

@@ -18,7 +18,7 @@
 
 
 namespace ultra_infer {
 namespace ultra_infer {
 namespace vision {
 namespace vision {
-/*! @brief Processor for convert images with given paramters.
+/*! @brief Processor for convert images with given parameters.
  */
  */
 class ULTRAINFER_DECL Convert : public Processor {
 class ULTRAINFER_DECL Convert : public Processor {
 public:
 public:

+ 1 - 1
libs/ultra-infer/ultra_infer/vision/common/processors/convert_and_permute.h

@@ -18,7 +18,7 @@
 
 
 namespace ultra_infer {
 namespace ultra_infer {
 namespace vision {
 namespace vision {
-/*! @brief Processor for convert images with given paramters and permute images
+/*! @brief Processor for convert images with given parameters and permute images
  * from HWC to CHW.
  * from HWC to CHW.
  */
  */
 class ULTRAINFER_DECL ConvertAndPermute : public Processor {
 class ULTRAINFER_DECL ConvertAndPermute : public Processor {

+ 1 - 1
libs/ultra-infer/ultra_infer/vision/common/processors/crop.h

@@ -19,7 +19,7 @@
 namespace ultra_infer {
 namespace ultra_infer {
 namespace vision {
 namespace vision {
 
 
-/*! @brief Processor for crop images with given paramters.
+/*! @brief Processor for crop images with given parameters.
  */
  */
 class ULTRAINFER_DECL Crop : public Processor {
 class ULTRAINFER_DECL Crop : public Processor {
 public:
 public:

+ 3 - 3
libs/ultra-infer/ultra_infer/vision/common/processors/limit_by_stride.h

@@ -19,7 +19,7 @@
 namespace ultra_infer {
 namespace ultra_infer {
 namespace vision {
 namespace vision {
 
 
-/*! @brief Processor for LimitByStride images with given paramters.
+/*! @brief Processor for LimitByStride images with given parameters.
  */
  */
 class ULTRAINFER_DECL LimitByStride : public Processor {
 class ULTRAINFER_DECL LimitByStride : public Processor {
 public:
 public:
@@ -38,8 +38,8 @@ public:
   /** \brief Process the input images
   /** \brief Process the input images
    *
    *
    * \param[in] mat The input image data
    * \param[in] mat The input image data
-   * \param[in] stride limit image stride, deafult is 32
-   * \param[in] interp interpolation method, deafult is 1
+   * \param[in] stride limit image stride, default is 32
+   * \param[in] interp interpolation method, default is 1
    * \param[in] lib to define OpenCV or FlyCV or CVCUDA will be used.
    * \param[in] lib to define OpenCV or FlyCV or CVCUDA will be used.
    * \return true if the process successed, otherwise false
    * \return true if the process successed, otherwise false
    */
    */

+ 2 - 2
libs/ultra-infer/ultra_infer/vision/common/processors/limit_short.h

@@ -19,7 +19,7 @@
 namespace ultra_infer {
 namespace ultra_infer {
 namespace vision {
 namespace vision {
 
 
-/*! @brief Processor for Limit images by short edge with given paramters.
+/*! @brief Processor for Limit images by short edge with given parameters.
  */
  */
 class LimitShort : public Processor {
 class LimitShort : public Processor {
 public:
 public:
@@ -45,7 +45,7 @@ public:
    * \param[in] mat The input image data
    * \param[in] mat The input image data
    * \param[in] max_short target size of short edge
    * \param[in] max_short target size of short edge
    * \param[in] min_short target size of short edge
    * \param[in] min_short target size of short edge
-   * \param[in] interp interpolation method, deafult is 1
+   * \param[in] interp interpolation method, default is 1
    * \param[in] lib to define OpenCV or FlyCV or CVCUDA will be used.
    * \param[in] lib to define OpenCV or FlyCV or CVCUDA will be used.
    * \return true if the process successed, otherwise false
    * \return true if the process successed, otherwise false
    */
    */

+ 1 - 1
libs/ultra-infer/ultra_infer/vision/common/processors/manager.h

@@ -30,7 +30,7 @@ public:
 
 
   /** \brief Use CUDA to boost the performance of processors
   /** \brief Use CUDA to boost the performance of processors
    *
    *
-   * \param[in] enable_cv_cuda ture: use CV-CUDA, false: use CUDA only
+   * \param[in] enable_cv_cuda true: use CV-CUDA, false: use CUDA only
    * \param[in] gpu_id GPU device id
    * \param[in] gpu_id GPU device id
    * \return true if the preprocess successed, otherwise false
    * \return true if the preprocess successed, otherwise false
    */
    */

+ 1 - 1
libs/ultra-infer/ultra_infer/vision/common/processors/mat_batch.h

@@ -28,7 +28,7 @@ enum FDMatBatchLayout { NHWC, NCHW };
 struct ULTRAINFER_DECL FDMatBatch {
 struct ULTRAINFER_DECL FDMatBatch {
   FDMatBatch() = default;
   FDMatBatch() = default;
 
 
-  // MatBatch is intialized with a list of mats,
+  // MatBatch is initialized with a list of mats,
   // the data is stored in the mats separately.
   // the data is stored in the mats separately.
   // Call Tensor() function to get a batched 4-dimension tensor.
   // Call Tensor() function to get a batched 4-dimension tensor.
   explicit FDMatBatch(std::vector<FDMat> *_mats) {
   explicit FDMatBatch(std::vector<FDMat> *_mats) {

+ 2 - 2
libs/ultra-infer/ultra_infer/vision/common/processors/normalize.h

@@ -18,7 +18,7 @@
 
 
 namespace ultra_infer {
 namespace ultra_infer {
 namespace vision {
 namespace vision {
-/*! @brief Processor for Normalize images with given paramters.
+/*! @brief Processor for Normalize images with given parameters.
  */
  */
 class ULTRAINFER_DECL Normalize : public Processor {
 class ULTRAINFER_DECL Normalize : public Processor {
 public:
 public:
@@ -48,7 +48,7 @@ public:
   // auto norm = Normalize(...)
   // auto norm = Normalize(...)
   // norm(mat)
   // norm(mat)
   // ```
   // ```
-  // There will be some precomputation in contruct function
+  // There will be some precomputation in construct function
   // and the `norm(mat)` only need to compute result = mat * alpha + beta
   // and the `norm(mat)` only need to compute result = mat * alpha + beta
   // which will reduce lots of time
   // which will reduce lots of time
   /** \brief Process the input images
   /** \brief Process the input images

+ 1 - 1
libs/ultra-infer/ultra_infer/vision/common/processors/normalize_and_permute.h

@@ -48,7 +48,7 @@ public:
   // auto norm = Normalize(...)
   // auto norm = Normalize(...)
   // norm(mat)
   // norm(mat)
   // ```
   // ```
-  // There will be some precomputation in contruct function
+  // There will be some precomputation in construct function
   // and the `norm(mat)` only need to compute result = mat * alpha + beta
   // and the `norm(mat)` only need to compute result = mat * alpha + beta
   // which will reduce lots of time
   // which will reduce lots of time
   /** \brief Process the input images
   /** \brief Process the input images

+ 1 - 1
libs/ultra-infer/ultra_infer/vision/common/processors/proc_lib.cc

@@ -37,7 +37,7 @@ std::ostream &operator<<(std::ostream &out, const ProcLib &p) {
     out << "ProcLib::CVCUDA";
     out << "ProcLib::CVCUDA";
     break;
     break;
   default:
   default:
-    FDASSERT(false, "Unknow type of ProcLib.");
+    FDASSERT(false, "Unknown type of ProcLib.");
   }
   }
   return out;
   return out;
 }
 }

+ 4 - 4
libs/ultra-infer/ultra_infer/vision/common/processors/resize.h

@@ -52,10 +52,10 @@ public:
    * \param[in] mat The input image data, `result = mat * alpha + beta`
    * \param[in] mat The input image data, `result = mat * alpha + beta`
    * \param[in] width width of the output image.
    * \param[in] width width of the output image.
    * \param[in] height height of the output image.
    * \param[in] height height of the output image.
-   * \param[in] scale_w scale of width, deafult is -1.0.
-   * \param[in] scale_h scale of height, deafult is -1.0.
-   * \param[in] interp interpolation method, deafult is 1.
-   * \param[in] use_scale to define wheather to scale the image, deafult is
+   * \param[in] scale_w scale of width, default is -1.0.
+   * \param[in] scale_h scale of height, default is -1.0.
+   * \param[in] interp interpolation method, default is 1.
+   * \param[in] use_scale to define whether to scale the image, default is
    * true. \param[in] lib to define OpenCV or FlyCV or CVCUDA will be used.
    * true. \param[in] lib to define OpenCV or FlyCV or CVCUDA will be used.
    * \return true if the process successed, otherwise false
    * \return true if the process successed, otherwise false
    */
    */

+ 2 - 2
libs/ultra-infer/ultra_infer/vision/common/processors/resize_by_short.h

@@ -49,8 +49,8 @@ public:
    *
    *
    * \param[in] mat The input image data, `result = mat * alpha + beta`
    * \param[in] mat The input image data, `result = mat * alpha + beta`
    * \param[in] target_size target size of the output image.
    * \param[in] target_size target size of the output image.
-   * \param[in] interp interpolation method, deafult is 1.
-   * \param[in] use_scale to define wheather to scale the image, deafult is
+   * \param[in] interp interpolation method, default is 1.
+   * \param[in] use_scale to define whether to scale the image, default is
    * true. \param[in] max_hw max HW fo output image. \param[in] lib to define
    * true. \param[in] max_hw max HW fo output image. \param[in] lib to define
    * OpenCV or FlyCV or CVCUDA will be used. \return true if the process
    * OpenCV or FlyCV or CVCUDA will be used. \return true if the process
    * successed, otherwise false
    * successed, otherwise false

+ 2 - 2
libs/ultra-infer/ultra_infer/vision/common/result.h

@@ -126,7 +126,7 @@ struct ULTRAINFER_DECL DetectionResult : public BaseResult {
   /// The classify label for all the detected objects
   /// The classify label for all the detected objects
   std::vector<int32_t> label_ids;
   std::vector<int32_t> label_ids;
   /** \brief For instance segmentation model, `masks` is the predict mask for
   /** \brief For instance segmentation model, `masks` is the predict mask for
-   * all the deteced objects
+   * all the detected objects
    */
    */
   std::vector<Mask> masks;
   std::vector<Mask> masks;
   /// Shows if the DetectionResult has mask
   /// Shows if the DetectionResult has mask
@@ -435,7 +435,7 @@ struct ULTRAINFER_DECL MattingResult : public BaseResult {
   std::vector<float> alpha; // h x w
   std::vector<float> alpha; // h x w
   /** \brief
   /** \brief
   If the model can predict foreground, `foreground` save the predicted
   If the model can predict foreground, `foreground` save the predicted
-  foreground image, the shape is [hight,width,channel] generally.
+  foreground image, the shape is [height,width,channel] generally.
   */
   */
   std::vector<float> foreground; // h x w x c (c=3 default)
   std::vector<float> foreground; // h x w x c (c=3 default)
   /** \brief
   /** \brief

+ 1 - 1
libs/ultra-infer/ultra_infer/vision/detection/contrib/fastestdet/fastestdet.h

@@ -45,7 +45,7 @@ public:
    *
    *
    * \param[in] img The input image data, comes from cv::imread(), is a 3-D
    * \param[in] img The input image data, comes from cv::imread(), is a 3-D
    * array with layout HWC, BGR format \param[in] result The output detection
    * array with layout HWC, BGR format \param[in] result The output detection
-   * result will be writen to this structure \return true if the prediction
+   * result will be written to this structure \return true if the prediction
    * successed, otherwise false
    * successed, otherwise false
    */
    */
   virtual bool Predict(const cv::Mat &img, DetectionResult *result);
   virtual bool Predict(const cv::Mat &img, DetectionResult *result);

+ 2 - 2
libs/ultra-infer/ultra_infer/vision/detection/contrib/nanodet_plus.cc

@@ -49,14 +49,14 @@ void WrapAndResize(Mat *mat, std::vector<int> size, std::vector<float> color,
                    bool keep_ratio = false) {
                    bool keep_ratio = false) {
   // Reference: nanodet/data/transform/warp.py#L139
   // Reference: nanodet/data/transform/warp.py#L139
   // size: tuple of input (width, height)
   // size: tuple of input (width, height)
-  // The default value of `keep_ratio` is `fasle` in
+  // The default value of `keep_ratio` is `false` in
   // `config/nanodet-plus-m-1.5x_320.yml` for both
   // `config/nanodet-plus-m-1.5x_320.yml` for both
   // train and val processes. So, we just let this
   // train and val processes. So, we just let this
   // option default `false` according to the official
   // option default `false` according to the official
   // implementation in NanoDet and NanoDet-Plus.
   // implementation in NanoDet and NanoDet-Plus.
   // Note, this function will apply a normal resize
   // Note, this function will apply a normal resize
   // operation to input Mat if the keep_ratio option
   // operation to input Mat if the keep_ratio option
-  // is fasle and the behavior will be the same as
+  // is false and the behavior will be the same as
   // yolov5's letterbox if keep_ratio is true.
   // yolov5's letterbox if keep_ratio is true.
 
 
   // with keep_ratio = false (default)
   // with keep_ratio = false (default)

+ 3 - 3
libs/ultra-infer/ultra_infer/vision/detection/contrib/nanodet_plus.h

@@ -48,9 +48,9 @@ public:
    *
    *
    * \param[in] im The input image data, comes from cv::imread(), is a 3-D array
    * \param[in] im The input image data, comes from cv::imread(), is a 3-D array
    * with layout HWC, BGR format \param[in] result The output detection result
    * with layout HWC, BGR format \param[in] result The output detection result
-   * will be writen to this structure \param[in] conf_threshold confidence
+   * will be written to this structure \param[in] conf_threshold confidence
    * threashold for postprocessing, default is 0.35 \param[in] nms_iou_threshold
    * threashold for postprocessing, default is 0.35 \param[in] nms_iou_threshold
-   * iou threashold for NMS, default is 0.5 \return true if the prediction
+   * iou threshold for NMS, default is 0.5 \return true if the prediction
    * successed, otherwise false
    * successed, otherwise false
    */
    */
   virtual bool Predict(cv::Mat *im, DetectionResult *result,
   virtual bool Predict(cv::Mat *im, DetectionResult *result,
@@ -70,7 +70,7 @@ public:
   // downsample strides for NanoDet-Plus to generate anchors,
   // downsample strides for NanoDet-Plus to generate anchors,
   // will take (8, 16, 32, 64) as default values
   // will take (8, 16, 32, 64) as default values
   std::vector<int> downsample_strides;
   std::vector<int> downsample_strides;
-  // for offseting the boxes by classes when using NMS, default 4096
+  // for offsetting the boxes by classes when using NMS, default 4096
   float max_wh;
   float max_wh;
   /*! @brief
   /*! @brief
   Argument for image postprocessing step, reg_max for GFL regression, default 7
   Argument for image postprocessing step, reg_max for GFL regression, default 7

+ 2 - 2
libs/ultra-infer/ultra_infer/vision/detection/contrib/rknpu2/preprocessor.h

@@ -73,7 +73,7 @@ protected:
   // padding value, size should be the same as channels
   // padding value, size should be the same as channels
   std::vector<float> padding_value_;
   std::vector<float> padding_value_;
 
 
-  // only pad to the minimum rectange which height and width is times of stride
+  // only pad to the minimum rectangle which height and width is times of stride
   bool is_mini_pad_;
   bool is_mini_pad_;
 
 
   // while is_mini_pad = false and is_no_pad = true,
   // while is_mini_pad = false and is_no_pad = true,
@@ -87,7 +87,7 @@ protected:
   // padding stride, for is_mini_pad
   // padding stride, for is_mini_pad
   int stride_;
   int stride_;
 
 
-  // for offseting the boxes by classes when using NMS
+  // for offsetting the boxes by classes when using NMS
   float max_wh_;
   float max_wh_;
 
 
   std::vector<std::vector<int>> pad_hw_values_;
   std::vector<std::vector<int>> pad_hw_values_;

+ 1 - 1
libs/ultra-infer/ultra_infer/vision/detection/contrib/rknpu2/rkyolo.h

@@ -34,7 +34,7 @@ public:
    *
    *
    * \param[in] img The input image data, comes from cv::imread(), is a 3-D
    * \param[in] img The input image data, comes from cv::imread(), is a 3-D
    * array with layout HWC, BGR format \param[in] result The output detection
    * array with layout HWC, BGR format \param[in] result The output detection
-   * result will be writen to this structure \return true if the prediction
+   * result will be written to this structure \return true if the prediction
    * successed, otherwise false
    * successed, otherwise false
    */
    */
   virtual bool Predict(const cv::Mat &img, DetectionResult *result);
   virtual bool Predict(const cv::Mat &img, DetectionResult *result);

+ 4 - 4
libs/ultra-infer/ultra_infer/vision/detection/contrib/scaledyolov4.h

@@ -45,9 +45,9 @@ public:
    *
    *
    * \param[in] im The input image data, comes from cv::imread(), is a 3-D array
    * \param[in] im The input image data, comes from cv::imread(), is a 3-D array
    * with layout HWC, BGR format \param[in] result The output detection result
    * with layout HWC, BGR format \param[in] result The output detection result
-   * will be writen to this structure \param[in] conf_threshold confidence
+   * will be written to this structure \param[in] conf_threshold confidence
    * threashold for postprocessing, default is 0.25 \param[in] nms_iou_threshold
    * threashold for postprocessing, default is 0.25 \param[in] nms_iou_threshold
-   * iou threashold for NMS, default is 0.5 \return true if the prediction
+   * iou threshold for NMS, default is 0.5 \return true if the prediction
    * successed, otherwise false
    * successed, otherwise false
    */
    */
   virtual bool Predict(cv::Mat *im, DetectionResult *result,
   virtual bool Predict(cv::Mat *im, DetectionResult *result,
@@ -61,7 +61,7 @@ public:
   std::vector<int> size;
   std::vector<int> size;
   // padding value, size should be the same as channels
   // padding value, size should be the same as channels
   std::vector<float> padding_value;
   std::vector<float> padding_value;
-  // only pad to the minimum rectange which height and width is times of stride
+  // only pad to the minimum rectangle which height and width is times of stride
   bool is_mini_pad;
   bool is_mini_pad;
   // while is_mini_pad = false and is_no_pad = true,
   // while is_mini_pad = false and is_no_pad = true,
   // will resize the image to the set size
   // will resize the image to the set size
@@ -71,7 +71,7 @@ public:
   bool is_scale_up;
   bool is_scale_up;
   // padding stride, for is_mini_pad
   // padding stride, for is_mini_pad
   int stride;
   int stride;
-  // for offseting the boxes by classes when using NMS
+  // for offsetting the boxes by classes when using NMS
   float max_wh;
   float max_wh;
 
 
 private:
 private:

+ 3 - 3
libs/ultra-infer/ultra_infer/vision/detection/contrib/yolor.h

@@ -42,7 +42,7 @@ public:
   /** \brief Predict the detection result for an input image
   /** \brief Predict the detection result for an input image
    *
    *
    * \param[in] im The input image data, comes from cv::imread()
    * \param[in] im The input image data, comes from cv::imread()
-   * \param[in] result The output detection result will be writen to this
+   * \param[in] result The output detection result will be written to this
    * structure \param[in] conf_threshold confidence threashold for
    * structure \param[in] conf_threshold confidence threashold for
    * postprocessing, default is 0.25 \param[in] nms_iou_threshold iou threashold
    * postprocessing, default is 0.25 \param[in] nms_iou_threshold iou threashold
    * for NMS, default is 0.5 \return true if the prediction successed, otherwise
    * for NMS, default is 0.5 \return true if the prediction successed, otherwise
@@ -60,7 +60,7 @@ public:
   // padding value, size should be the same as channels
   // padding value, size should be the same as channels
 
 
   std::vector<float> padding_value;
   std::vector<float> padding_value;
-  // only pad to the minimum rectange which height and width is times of stride
+  // only pad to the minimum rectangle which height and width is times of stride
   bool is_mini_pad;
   bool is_mini_pad;
   // while is_mini_pad = false and is_no_pad = true,
   // while is_mini_pad = false and is_no_pad = true,
   // will resize the image to the set size
   // will resize the image to the set size
@@ -70,7 +70,7 @@ public:
   bool is_scale_up;
   bool is_scale_up;
   // padding stride, for is_mini_pad
   // padding stride, for is_mini_pad
   int stride;
   int stride;
-  // for offseting the boxes by classes when using NMS
+  // for offsetting the boxes by classes when using NMS
   float max_wh;
   float max_wh;
 
 
 private:
 private:

+ 3 - 3
libs/ultra-infer/ultra_infer/vision/detection/contrib/yolov5/preprocessor.h

@@ -59,7 +59,7 @@ public:
   /// Get is_scale_up, default true
   /// Get is_scale_up, default true
   bool GetScaleUp() const { return is_scale_up_; }
   bool GetScaleUp() const { return is_scale_up_; }
 
 
-  /// Set is_mini_pad, pad to the minimum rectange
+  /// Set is_mini_pad, pad to the minimum rectangle
   /// which height and width is times of stride
   /// which height and width is times of stride
   void SetMiniPad(bool is_mini_pad) { is_mini_pad_ = is_mini_pad; }
   void SetMiniPad(bool is_mini_pad) { is_mini_pad_ = is_mini_pad; }
 
 
@@ -84,7 +84,7 @@ protected:
   // padding value, size should be the same as channels
   // padding value, size should be the same as channels
   std::vector<float> padding_value_;
   std::vector<float> padding_value_;
 
 
-  // only pad to the minimum rectange which height and width is times of stride
+  // only pad to the minimum rectangle which height and width is times of stride
   bool is_mini_pad_;
   bool is_mini_pad_;
 
 
   // while is_mini_pad = false and is_no_pad = true,
   // while is_mini_pad = false and is_no_pad = true,
@@ -98,7 +98,7 @@ protected:
   // padding stride, for is_mini_pad
   // padding stride, for is_mini_pad
   int stride_;
   int stride_;
 
 
-  // for offseting the boxes by classes when using NMS
+  // for offsetting the boxes by classes when using NMS
   float max_wh_;
   float max_wh_;
 };
 };
 
 

+ 3 - 3
libs/ultra-infer/ultra_infer/vision/detection/contrib/yolov5/yolov5.h

@@ -46,9 +46,9 @@ public:
    *
    *
    * \param[in] im The input image data, comes from cv::imread(), is a 3-D array
    * \param[in] im The input image data, comes from cv::imread(), is a 3-D array
    * with layout HWC, BGR format \param[in] result The output detection result
    * with layout HWC, BGR format \param[in] result The output detection result
-   * will be writen to this structure \param[in] conf_threshold confidence
+   * will be written to this structure \param[in] conf_threshold confidence
    * threashold for postprocessing, default is 0.25 \param[in] nms_threshold iou
    * threashold for postprocessing, default is 0.25 \param[in] nms_threshold iou
-   * threashold for NMS, default is 0.5 \return true if the prediction
+   * threshold for NMS, default is 0.5 \return true if the prediction
    * successed, otherwise false
    * successed, otherwise false
    */
    */
   virtual bool Predict(cv::Mat *im, DetectionResult *result,
   virtual bool Predict(cv::Mat *im, DetectionResult *result,
@@ -58,7 +58,7 @@ public:
    *
    *
    * \param[in] img The input image data, comes from cv::imread(), is a 3-D
    * \param[in] img The input image data, comes from cv::imread(), is a 3-D
    * array with layout HWC, BGR format \param[in] result The output detection
    * array with layout HWC, BGR format \param[in] result The output detection
-   * result will be writen to this structure \return true if the prediction
+   * result will be written to this structure \return true if the prediction
    * successed, otherwise false
    * successed, otherwise false
    */
    */
   virtual bool Predict(const cv::Mat &img, DetectionResult *result);
   virtual bool Predict(const cv::Mat &img, DetectionResult *result);

+ 7 - 7
libs/ultra-infer/ultra_infer/vision/detection/contrib/yolov5lite.h

@@ -45,9 +45,9 @@ public:
    *
    *
    * \param[in] im The input image data, comes from cv::imread(), is a 3-D array
    * \param[in] im The input image data, comes from cv::imread(), is a 3-D array
    * with layout HWC, BGR format \param[in] result The output detection result
    * with layout HWC, BGR format \param[in] result The output detection result
-   * will be writen to this structure \param[in] conf_threshold confidence
+   * will be written to this structure \param[in] conf_threshold confidence
    * threashold for postprocessing, default is 0.45 \param[in] nms_iou_threshold
    * threashold for postprocessing, default is 0.45 \param[in] nms_iou_threshold
-   * iou threashold for NMS, default is 0.25 \return true if the prediction
+   * iou threshold for NMS, default is 0.25 \return true if the prediction
    * successed, otherwise false
    * successed, otherwise false
    */
    */
   virtual bool Predict(cv::Mat *im, DetectionResult *result,
   virtual bool Predict(cv::Mat *im, DetectionResult *result,
@@ -64,7 +64,7 @@ public:
   // padding value, size should be the same as channels
   // padding value, size should be the same as channels
 
 
   std::vector<float> padding_value;
   std::vector<float> padding_value;
-  // only pad to the minimum rectange which height and width is times of stride
+  // only pad to the minimum rectangle which height and width is times of stride
   bool is_mini_pad;
   bool is_mini_pad;
   // while is_mini_pad = false and is_no_pad = true,
   // while is_mini_pad = false and is_no_pad = true,
   // will resize the image to the set size
   // will resize the image to the set size
@@ -74,13 +74,13 @@ public:
   bool is_scale_up;
   bool is_scale_up;
   // padding stride, for is_mini_pad
   // padding stride, for is_mini_pad
   int stride;
   int stride;
-  // for offseting the boxes by classes when using NMS
+  // for offsetting the boxes by classes when using NMS
   float max_wh;
   float max_wh;
   // downsample strides for YOLOv5Lite to generate anchors,
   // downsample strides for YOLOv5Lite to generate anchors,
   // will take (8,16,32) as default values, might have stride=64.
   // will take (8,16,32) as default values, might have stride=64.
   std::vector<int> downsample_strides;
   std::vector<int> downsample_strides;
   // anchors parameters, downsample_strides will take (8,16,32),
   // anchors parameters, downsample_strides will take (8,16,32),
-  // each stride has three anchors with width and hight
+  // each stride has three anchors with width and height
   std::vector<std::vector<float>> anchor_config;
   std::vector<std::vector<float>> anchor_config;
   /*! @brief
   /*! @brief
     whether the model_file was exported with decode module. The official
     whether the model_file was exported with decode module. The official
@@ -117,7 +117,7 @@ private:
 
 
   // the official YOLOv5Lite/export.py will export ONNX file without decode
   // the official YOLOv5Lite/export.py will export ONNX file without decode
   // module.
   // module.
-  // this fuction support the postporocess for ONNX file without decode module.
+  // this function support the postporocess for ONNX file without decode module.
   // set the `is_decode_exported = false`, this function will work.
   // set the `is_decode_exported = false`, this function will work.
   bool PostprocessWithDecode(
   bool PostprocessWithDecode(
       FDTensor &infer_result, DetectionResult *result,
       FDTensor &infer_result, DetectionResult *result,
@@ -129,7 +129,7 @@ private:
                  bool scale_fill = false, bool scale_up = true,
                  bool scale_fill = false, bool scale_up = true,
                  int stride = 32);
                  int stride = 32);
 
 
-  // generate anchors for decodeing when ONNX file without decode module.
+  // generate anchors for decoding when ONNX file without decode module.
   void GenerateAnchors(const std::vector<int> &size,
   void GenerateAnchors(const std::vector<int> &size,
                        const std::vector<int> &downsample_strides,
                        const std::vector<int> &downsample_strides,
                        std::vector<Anchor> *anchors, const int num_anchors = 3);
                        std::vector<Anchor> *anchors, const int num_anchors = 3);

+ 3 - 3
libs/ultra-infer/ultra_infer/vision/detection/contrib/yolov5seg/preprocessor.h

@@ -59,7 +59,7 @@ public:
   /// Get is_scale_up, default true
   /// Get is_scale_up, default true
   bool GetScaleUp() const { return is_scale_up_; }
   bool GetScaleUp() const { return is_scale_up_; }
 
 
-  /// Set is_mini_pad, pad to the minimum rectange
+  /// Set is_mini_pad, pad to the minimum rectangle
   /// which height and width is times of stride
   /// which height and width is times of stride
   void SetMiniPad(bool is_mini_pad) { is_mini_pad_ = is_mini_pad; }
   void SetMiniPad(bool is_mini_pad) { is_mini_pad_ = is_mini_pad; }
 
 
@@ -84,7 +84,7 @@ protected:
   // padding value, size should be the same as channels
   // padding value, size should be the same as channels
   std::vector<float> padding_value_;
   std::vector<float> padding_value_;
 
 
-  // only pad to the minimum rectange which height and width is times of stride
+  // only pad to the minimum rectangle which height and width is times of stride
   bool is_mini_pad_;
   bool is_mini_pad_;
 
 
   // while is_mini_pad = false and is_no_pad = true,
   // while is_mini_pad = false and is_no_pad = true,
@@ -98,7 +98,7 @@ protected:
   // padding stride, for is_mini_pad
   // padding stride, for is_mini_pad
   int stride_;
   int stride_;
 
 
-  // for offseting the boxes by classes when using NMS
+  // for offsetting the boxes by classes when using NMS
   float max_wh_;
   float max_wh_;
 };
 };
 
 

+ 1 - 1
libs/ultra-infer/ultra_infer/vision/detection/contrib/yolov5seg/yolov5seg.h

@@ -45,7 +45,7 @@ public:
    *
    *
    * \param[in] img The input image data, comes from cv::imread(), is a 3-D
    * \param[in] img The input image data, comes from cv::imread(), is a 3-D
    * array with layout HWC, BGR format \param[in] result The output detection
    * array with layout HWC, BGR format \param[in] result The output detection
-   * result will be writen to this structure \return true if the prediction
+   * result will be written to this structure \return true if the prediction
    * successed, otherwise false
    * successed, otherwise false
    */
    */
   virtual bool Predict(const cv::Mat &img, DetectionResult *result);
   virtual bool Predict(const cv::Mat &img, DetectionResult *result);

+ 4 - 4
libs/ultra-infer/ultra_infer/vision/detection/contrib/yolov6.h

@@ -48,9 +48,9 @@ public:
    *
    *
    * \param[in] im The input image data, comes from cv::imread(), is a 3-D array
    * \param[in] im The input image data, comes from cv::imread(), is a 3-D array
    * with layout HWC, BGR format \param[in] result The output detection result
    * with layout HWC, BGR format \param[in] result The output detection result
-   * will be writen to this structure \param[in] conf_threshold confidence
+   * will be written to this structure \param[in] conf_threshold confidence
    * threashold for postprocessing, default is 0.25 \param[in] nms_iou_threshold
    * threashold for postprocessing, default is 0.25 \param[in] nms_iou_threshold
-   * iou threashold for NMS, default is 0.5 \return true if the prediction
+   * iou threshold for NMS, default is 0.5 \return true if the prediction
    * successed, otherwise false
    * successed, otherwise false
    */
    */
   virtual bool Predict(cv::Mat *im, DetectionResult *result,
   virtual bool Predict(cv::Mat *im, DetectionResult *result,
@@ -67,7 +67,7 @@ public:
   // padding value, size should be the same as channels
   // padding value, size should be the same as channels
 
 
   std::vector<float> padding_value;
   std::vector<float> padding_value;
-  // only pad to the minimum rectange which height and width is times of stride
+  // only pad to the minimum rectangle which height and width is times of stride
   bool is_mini_pad;
   bool is_mini_pad;
   // while is_mini_pad = false and is_no_pad = true,
   // while is_mini_pad = false and is_no_pad = true,
   // will resize the image to the set size
   // will resize the image to the set size
@@ -77,7 +77,7 @@ public:
   bool is_scale_up;
   bool is_scale_up;
   // padding stride, for is_mini_pad
   // padding stride, for is_mini_pad
   int stride;
   int stride;
-  // for offseting the boxes by classes when using NMS,
+  // for offsetting the boxes by classes when using NMS,
   // default 4096 in meituan/YOLOv6
   // default 4096 in meituan/YOLOv6
   float max_wh;
   float max_wh;
 
 

Some files were not shown because too many files changed in this diff