Procházet zdrojové kódy

Fix typos successed succeeded (#3988)

co63oc před 6 měsíci
rodič
revize
3043d23376
100 změnil soubory, kde provedl 152 přidání a 152 odebrání
  1. 1 1
      libs/ultra-infer/python/ultra_infer/download.py
  2. 1 1
      libs/ultra-infer/ultra_infer/pipeline/pptinypose/pipeline.h
  3. 2 2
      libs/ultra-infer/ultra_infer/runtime/runtime.h
  4. 1 1
      libs/ultra-infer/ultra_infer/vision/classification/contrib/yolov5cls/postprocessor.h
  5. 1 1
      libs/ultra-infer/ultra_infer/vision/classification/contrib/yolov5cls/preprocessor.h
  6. 2 2
      libs/ultra-infer/ultra_infer/vision/classification/contrib/yolov5cls/yolov5cls.h
  7. 5 5
      libs/ultra-infer/ultra_infer/vision/classification/ppcls/model.h
  8. 1 1
      libs/ultra-infer/ultra_infer/vision/classification/ppcls/postprocessor.h
  9. 1 1
      libs/ultra-infer/ultra_infer/vision/classification/ppcls/preprocessor.h
  10. 5 5
      libs/ultra-infer/ultra_infer/vision/classification/ppshitu/ppshituv2_rec.h
  11. 1 1
      libs/ultra-infer/ultra_infer/vision/classification/ppshitu/ppshituv2_rec_postprocessor.h
  12. 1 1
      libs/ultra-infer/ultra_infer/vision/classification/ppshitu/ppshituv2_rec_preprocessor.h
  13. 3 3
      libs/ultra-infer/ultra_infer/vision/common/processors/base.h
  14. 1 1
      libs/ultra-infer/ultra_infer/vision/common/processors/cast.h
  15. 1 1
      libs/ultra-infer/ultra_infer/vision/common/processors/center_crop.h
  16. 4 4
      libs/ultra-infer/ultra_infer/vision/common/processors/color_space_convert.h
  17. 1 1
      libs/ultra-infer/ultra_infer/vision/common/processors/convert.h
  18. 1 1
      libs/ultra-infer/ultra_infer/vision/common/processors/convert_and_permute.h
  19. 1 1
      libs/ultra-infer/ultra_infer/vision/common/processors/crop.h
  20. 1 1
      libs/ultra-infer/ultra_infer/vision/common/processors/hwc2chw.h
  21. 1 1
      libs/ultra-infer/ultra_infer/vision/common/processors/limit_by_stride.h
  22. 1 1
      libs/ultra-infer/ultra_infer/vision/common/processors/limit_short.h
  23. 3 3
      libs/ultra-infer/ultra_infer/vision/common/processors/manager.h
  24. 1 1
      libs/ultra-infer/ultra_infer/vision/common/processors/normalize.h
  25. 1 1
      libs/ultra-infer/ultra_infer/vision/common/processors/normalize_and_permute.h
  26. 1 1
      libs/ultra-infer/ultra_infer/vision/common/processors/pad.h
  27. 1 1
      libs/ultra-infer/ultra_infer/vision/common/processors/pad_to_size.h
  28. 1 1
      libs/ultra-infer/ultra_infer/vision/common/processors/resize.h
  29. 1 1
      libs/ultra-infer/ultra_infer/vision/common/processors/resize_by_short.h
  30. 1 1
      libs/ultra-infer/ultra_infer/vision/common/processors/stride_pad.h
  31. 2 2
      libs/ultra-infer/ultra_infer/vision/detection/contrib/fastestdet/fastestdet.h
  32. 1 1
      libs/ultra-infer/ultra_infer/vision/detection/contrib/fastestdet/postprocessor.h
  33. 1 1
      libs/ultra-infer/ultra_infer/vision/detection/contrib/fastestdet/preprocessor.h
  34. 1 1
      libs/ultra-infer/ultra_infer/vision/detection/contrib/nanodet_plus.h
  35. 1 1
      libs/ultra-infer/ultra_infer/vision/detection/contrib/rknpu2/postprocessor.h
  36. 1 1
      libs/ultra-infer/ultra_infer/vision/detection/contrib/rknpu2/preprocessor.h
  37. 2 2
      libs/ultra-infer/ultra_infer/vision/detection/contrib/rknpu2/rkyolo.h
  38. 1 1
      libs/ultra-infer/ultra_infer/vision/detection/contrib/scaledyolov4.h
  39. 1 1
      libs/ultra-infer/ultra_infer/vision/detection/contrib/yolor.h
  40. 1 1
      libs/ultra-infer/ultra_infer/vision/detection/contrib/yolov5/postprocessor.h
  41. 1 1
      libs/ultra-infer/ultra_infer/vision/detection/contrib/yolov5/preprocessor.h
  42. 3 3
      libs/ultra-infer/ultra_infer/vision/detection/contrib/yolov5/yolov5.h
  43. 1 1
      libs/ultra-infer/ultra_infer/vision/detection/contrib/yolov5lite.h
  44. 1 1
      libs/ultra-infer/ultra_infer/vision/detection/contrib/yolov5seg/postprocessor.h
  45. 1 1
      libs/ultra-infer/ultra_infer/vision/detection/contrib/yolov5seg/preprocessor.h
  46. 2 2
      libs/ultra-infer/ultra_infer/vision/detection/contrib/yolov5seg/yolov5seg.h
  47. 1 1
      libs/ultra-infer/ultra_infer/vision/detection/contrib/yolov6.h
  48. 1 1
      libs/ultra-infer/ultra_infer/vision/detection/contrib/yolov7/postprocessor.h
  49. 1 1
      libs/ultra-infer/ultra_infer/vision/detection/contrib/yolov7/preprocessor.h
  50. 3 3
      libs/ultra-infer/ultra_infer/vision/detection/contrib/yolov7/yolov7.h
  51. 1 1
      libs/ultra-infer/ultra_infer/vision/detection/contrib/yolov7end2end_ort.h
  52. 1 1
      libs/ultra-infer/ultra_infer/vision/detection/contrib/yolov7end2end_trt.h
  53. 1 1
      libs/ultra-infer/ultra_infer/vision/detection/contrib/yolov8/postprocessor.h
  54. 1 1
      libs/ultra-infer/ultra_infer/vision/detection/contrib/yolov8/preprocessor.h
  55. 2 2
      libs/ultra-infer/ultra_infer/vision/detection/contrib/yolov8/yolov8.h
  56. 1 1
      libs/ultra-infer/ultra_infer/vision/detection/contrib/yolox.h
  57. 3 3
      libs/ultra-infer/ultra_infer/vision/detection/ppdet/base.h
  58. 1 1
      libs/ultra-infer/ultra_infer/vision/detection/ppdet/postprocessor.h
  59. 1 1
      libs/ultra-infer/ultra_infer/vision/detection/ppdet/preprocessor.h
  60. 1 1
      libs/ultra-infer/ultra_infer/vision/facealign/contrib/face_landmark_1000.h
  61. 1 1
      libs/ultra-infer/ultra_infer/vision/facealign/contrib/pfld.h
  62. 1 1
      libs/ultra-infer/ultra_infer/vision/facealign/contrib/pipnet.h
  63. 2 2
      libs/ultra-infer/ultra_infer/vision/facedet/contrib/centerface/centerface.h
  64. 1 1
      libs/ultra-infer/ultra_infer/vision/facedet/contrib/centerface/postprocessor.h
  65. 1 1
      libs/ultra-infer/ultra_infer/vision/facedet/contrib/retinaface.h
  66. 1 1
      libs/ultra-infer/ultra_infer/vision/facedet/contrib/scrfd.h
  67. 1 1
      libs/ultra-infer/ultra_infer/vision/facedet/contrib/ultraface.h
  68. 1 1
      libs/ultra-infer/ultra_infer/vision/facedet/contrib/yolov5face.h
  69. 1 1
      libs/ultra-infer/ultra_infer/vision/facedet/contrib/yolov7face/postprocessor.h
  70. 2 2
      libs/ultra-infer/ultra_infer/vision/facedet/contrib/yolov7face/yolov7face.h
  71. 2 2
      libs/ultra-infer/ultra_infer/vision/facedet/ppdet/blazeface/blazeface.h
  72. 1 1
      libs/ultra-infer/ultra_infer/vision/facedet/ppdet/blazeface/postprocessor.h
  73. 2 2
      libs/ultra-infer/ultra_infer/vision/faceid/contrib/adaface/adaface.h
  74. 1 1
      libs/ultra-infer/ultra_infer/vision/faceid/contrib/adaface/postprocessor.h
  75. 1 1
      libs/ultra-infer/ultra_infer/vision/faceid/contrib/adaface/preprocessor.h
  76. 2 2
      libs/ultra-infer/ultra_infer/vision/faceid/contrib/insightface/base.h
  77. 1 1
      libs/ultra-infer/ultra_infer/vision/faceid/contrib/insightface/postprocessor.h
  78. 1 1
      libs/ultra-infer/ultra_infer/vision/faceid/contrib/insightface/preprocessor.h
  79. 2 2
      libs/ultra-infer/ultra_infer/vision/generation/contrib/animegan.h
  80. 1 1
      libs/ultra-infer/ultra_infer/vision/generation/contrib/postprocessor.h
  81. 1 1
      libs/ultra-infer/ultra_infer/vision/generation/contrib/preprocessor.h
  82. 1 1
      libs/ultra-infer/ultra_infer/vision/headpose/contrib/fsanet.h
  83. 2 2
      libs/ultra-infer/ultra_infer/vision/keypointdet/pptinypose/pptinypose.h
  84. 1 1
      libs/ultra-infer/ultra_infer/vision/matting/contrib/modnet.h
  85. 1 1
      libs/ultra-infer/ultra_infer/vision/matting/contrib/rvm.h
  86. 1 1
      libs/ultra-infer/ultra_infer/vision/matting/ppmatting/ppmatting.h
  87. 4 4
      libs/ultra-infer/ultra_infer/vision/ocr/ppocr/classifier.h
  88. 1 1
      libs/ultra-infer/ultra_infer/vision/ocr/ppocr/cls_postprocessor.h
  89. 2 2
      libs/ultra-infer/ultra_infer/vision/ocr/ppocr/cls_preprocessor.h
  90. 4 4
      libs/ultra-infer/ultra_infer/vision/ocr/ppocr/dbcurvedetector.h
  91. 4 4
      libs/ultra-infer/ultra_infer/vision/ocr/ppocr/dbdetector.h
  92. 1 1
      libs/ultra-infer/ultra_infer/vision/ocr/ppocr/det_postprocessor.h
  93. 1 1
      libs/ultra-infer/ultra_infer/vision/ocr/ppocr/det_postprocessor_curve.h
  94. 1 1
      libs/ultra-infer/ultra_infer/vision/ocr/ppocr/det_preprocessor.h
  95. 2 2
      libs/ultra-infer/ultra_infer/vision/ocr/ppocr/ppocr_v2.h
  96. 2 2
      libs/ultra-infer/ultra_infer/vision/ocr/ppocr/ppstructurev2_table.h
  97. 1 1
      libs/ultra-infer/ultra_infer/vision/ocr/ppocr/rec_postprocessor.h
  98. 2 2
      libs/ultra-infer/ultra_infer/vision/ocr/ppocr/rec_preprocessor.h
  99. 4 4
      libs/ultra-infer/ultra_infer/vision/ocr/ppocr/recognizer.h
  100. 3 3
      libs/ultra-infer/ultra_infer/vision/ocr/ppocr/structurev2_layout.h

+ 1 - 1
libs/ultra-infer/python/ultra_infer/download.py

@@ -135,7 +135,7 @@ def decompress(fname):
 
     # For protecting decompressing interrupted,
     # decompress to fpath_tmp directory firstly, if decompress
-    # successed, move decompress files to fpath and delete
+    # succeeded, move decompress files to fpath and delete
     # fpath_tmp and remove download compress file.
     fpath = osp.split(fname)[0]
     fpath_tmp = osp.join(fpath, "tmp")

+ 1 - 1
libs/ultra-infer/ultra_infer/pipeline/pptinypose/pipeline.h

@@ -43,7 +43,7 @@ public:
    *
    * \param[in] img The input image data, comes from cv::imread()
    * \param[in] result The output keypoint detection result will be written to
-   * this structure \return true if the prediction successed, otherwise false
+   * this structure \return true if the prediction succeeded, otherwise false
    */
   virtual bool Predict(cv::Mat *img,
                        ultra_infer::vision::KeyPointDetectionResult *result);

+ 2 - 2
libs/ultra-infer/ultra_infer/runtime/runtime.h

@@ -40,7 +40,7 @@ public:
    *
    * \param[in] input_tensors Notice the FDTensor::name should keep same with
    * the model's input \param[in] output_tensors Inference results \return true
-   * if the inference successed, otherwise false
+   * if the inference succeeded, otherwise false
    */
   bool Infer(std::vector<FDTensor> &input_tensors,
              std::vector<FDTensor> *output_tensors);
@@ -99,7 +99,7 @@ public:
   /** \brief Compile TorchScript Module, only for Poros backend
    *
    * \param[in] prewarm_tensors Prewarm datas for compile
-   * \return true if compile successed, otherwise false
+   * \return true if compile succeeded, otherwise false
    */
   bool Compile(std::vector<std::vector<FDTensor>> &prewarm_tensors);
   /** \brief Get profile time of Runtime after the profile process is done.

+ 1 - 1
libs/ultra-infer/ultra_infer/vision/classification/contrib/yolov5cls/postprocessor.h

@@ -33,7 +33,7 @@ public:
    * \param[in] tensors The inference result from runtime
    * \param[in] result The output result of classification
    * \param[in] ims_info The shape info list, record input_shape and
-   * output_shape \return true if the postprocess successed, otherwise false
+   * output_shape \return true if the postprocess succeeded, otherwise false
    */
   bool
   Run(const std::vector<FDTensor> &tensors,

+ 1 - 1
libs/ultra-infer/ultra_infer/vision/classification/contrib/yolov5cls/preprocessor.h

@@ -33,7 +33,7 @@ public:
    * \param[in] images The input image data list, all the elements are returned
    * by cv::imread() \param[in] outputs The output tensors which will feed in
    * runtime \param[in] ims_info The shape info list, record input_shape and
-   * output_shape \return true if the preprocess successed, otherwise false
+   * output_shape \return true if the preprocess succeeded, otherwise false
    */
   bool Run(std::vector<FDMat> *images, std::vector<FDTensor> *outputs,
            std::vector<std::map<std::string, std::array<float, 2>>> *ims_info);

+ 2 - 2
libs/ultra-infer/ultra_infer/vision/classification/contrib/yolov5cls/yolov5cls.h

@@ -46,7 +46,7 @@ public:
    * \param[in] img The input image data, comes from cv::imread(), is a 3-D
    * array with layout HWC, BGR format \param[in] result The output
    * classification result will be written to this structure \return true if the
-   * prediction successed, otherwise false
+   * prediction succeeded, otherwise false
    */
   virtual bool Predict(const cv::Mat &img, ClassifyResult *result);
 
@@ -54,7 +54,7 @@ public:
    *
    * \param[in] imgs, The input image list, each element comes from cv::imread()
    * \param[in] results The output classification result list
-   * \return true if the prediction successed, otherwise false
+   * \return true if the prediction succeeded, otherwise false
    */
   virtual bool BatchPredict(const std::vector<cv::Mat> &imgs,
                             std::vector<ClassifyResult> *results);

+ 5 - 5
libs/ultra-infer/ultra_infer/vision/classification/ppcls/model.h

@@ -60,7 +60,7 @@ public:
    *
    * \param[in] im The input image data, comes from cv::imread()
    * \param[in] result The output classification result will be written to this
-   * structure \return true if the prediction successed, otherwise false
+   * structure \return true if the prediction succeeded, otherwise false
    */
   virtual bool Predict(cv::Mat *im, ClassifyResult *result, int topk = 1);
 
@@ -68,7 +68,7 @@ public:
    *
    * \param[in] img The input image data, comes from cv::imread()
    * \param[in] result The output classification result
-   * \return true if the prediction successed, otherwise false
+   * \return true if the prediction succeeded, otherwise false
    */
   virtual bool Predict(const cv::Mat &img, ClassifyResult *result);
 
@@ -76,7 +76,7 @@ public:
    *
    * \param[in] imgs, The input image list, each element comes from cv::imread()
    * \param[in] results The output classification result list
-   * \return true if the prediction successed, otherwise false
+   * \return true if the prediction succeeded, otherwise false
    */
   virtual bool BatchPredict(const std::vector<cv::Mat> &imgs,
                             std::vector<ClassifyResult> *results);
@@ -85,7 +85,7 @@ public:
    *
    * \param[in] mat The input mat
    * \param[in] result The output classification result
-   * \return true if the prediction successed, otherwise false
+   * \return true if the prediction succeeded, otherwise false
    */
   virtual bool Predict(const FDMat &mat, ClassifyResult *result);
 
@@ -93,7 +93,7 @@ public:
    *
    * \param[in] mats, The input mat list
    * \param[in] results The output classification result list
-   * \return true if the prediction successed, otherwise false
+   * \return true if the prediction succeeded, otherwise false
    */
   virtual bool BatchPredict(const std::vector<FDMat> &mats,
                             std::vector<ClassifyResult> *results);

+ 1 - 1
libs/ultra-infer/ultra_infer/vision/classification/ppcls/postprocessor.h

@@ -35,7 +35,7 @@ public:
    *
    * \param[in] tensors The inference result from runtime
    * \param[in] result The output result of classification
-   * \return true if the postprocess successed, otherwise false
+   * \return true if the postprocess succeeded, otherwise false
    */
   bool Run(const std::vector<FDTensor> &tensors,
            std::vector<ClassifyResult> *result);

+ 1 - 1
libs/ultra-infer/ultra_infer/vision/classification/ppcls/preprocessor.h

@@ -38,7 +38,7 @@ public:
    *
    * \param[in] image_batch The input image batch
    * \param[in] outputs The output tensors which will feed in runtime
-   * \return true if the preprocess successed, otherwise false
+   * \return true if the preprocess succeeded, otherwise false
    */
   virtual bool Apply(FDMatBatch *image_batch, std::vector<FDTensor> *outputs);
 

+ 5 - 5
libs/ultra-infer/ultra_infer/vision/classification/ppshitu/ppshituv2_rec.h

@@ -58,7 +58,7 @@ public:
    *
    * \param[in] im The input image data, comes from cv::imread()
    * \param[in] result The output feature vector result will be written to this
-   * structure \return true if the prediction successed, otherwise false
+   * structure \return true if the prediction succeeded, otherwise false
    */
   virtual bool Predict(cv::Mat *im, ClassifyResult *result);
 
@@ -66,7 +66,7 @@ public:
    *
    * \param[in] img The input image data, comes from cv::imread()
    * \param[in] result The output feature vector result
-   * \return true if the prediction successed, otherwise false
+   * \return true if the prediction succeeded, otherwise false
    */
   virtual bool Predict(const cv::Mat &img, ClassifyResult *result);
 
@@ -74,7 +74,7 @@ public:
    *
    * \param[in] imgs, The input image list, each element comes from cv::imread()
    * \param[in] results The output feature vector(namely ClassifyResult.feature)
-   * result list \return true if the prediction successed, otherwise false
+   * result list \return true if the prediction succeeded, otherwise false
    */
   virtual bool BatchPredict(const std::vector<cv::Mat> &imgs,
                             std::vector<ClassifyResult> *results);
@@ -83,7 +83,7 @@ public:
    *
    * \param[in] mat The input mat
    * \param[in] result The output feature vector result
-   * \return true if the prediction successed, otherwise false
+   * \return true if the prediction succeeded, otherwise false
    */
   virtual bool Predict(const FDMat &mat, ClassifyResult *result);
 
@@ -91,7 +91,7 @@ public:
    *
    * \param[in] mats, The input mat list
    * \param[in] results The output feature vector result list
-   * \return true if the prediction successed, otherwise false
+   * \return true if the prediction succeeded, otherwise false
    */
   virtual bool BatchPredict(const std::vector<FDMat> &mats,
                             std::vector<ClassifyResult> *results);

+ 1 - 1
libs/ultra-infer/ultra_infer/vision/classification/ppshitu/ppshituv2_rec_postprocessor.h

@@ -30,7 +30,7 @@ public:
    *
    * \param[in] tensors The inference result from runtime
    * \param[in] result The output result of feature vector (see
-   * ClassifyResult.feature member) \return true if the postprocess successed,
+   * ClassifyResult.feature member) \return true if the postprocess succeeded,
    * otherwise false
    */
   bool Run(const std::vector<FDTensor> &tensors,

+ 1 - 1
libs/ultra-infer/ultra_infer/vision/classification/ppshitu/ppshituv2_rec_preprocessor.h

@@ -38,7 +38,7 @@ public:
    *
    * \param[in] image_batch The input image batch
    * \param[in] outputs The output tensors which will feed in runtime
-   * \return true if the preprocess successed, otherwise false
+   * \return true if the preprocess succeeded, otherwise false
    */
   virtual bool Apply(FDMatBatch *image_batch, std::vector<FDTensor> *outputs);
 

+ 3 - 3
libs/ultra-infer/ultra_infer/vision/common/processors/base.h

@@ -65,7 +65,7 @@ public:
   /*! @brief operator `()` for calling processor in this way: `processor(mat)`
    *
    * \param[in] mat: The input mat
-   * \return true if the process successed, otherwise false
+   * \return true if the process succeeded, otherwise false
    */
   virtual bool operator()(FDMat *mat);
 
@@ -76,7 +76,7 @@ public:
    *
    * \param[in] mat: The input mat
    * \param[in] lib: The processing library, opencv, cv-cuda, flycv, etc.
-   * \return true if the process successed, otherwise false
+   * \return true if the process succeeded, otherwise false
    */
   virtual bool operator()(FDMat *mat, ProcLib lib);
 
@@ -84,7 +84,7 @@ public:
    * `processor(mat_batch)`
    *
    * \param[in] mat_batch: The input mat batch
-   * \return true if the process successed, otherwise false
+   * \return true if the process succeeded, otherwise false
    */
   virtual bool operator()(FDMatBatch *mat_batch);
 };

+ 1 - 1
libs/ultra-infer/ultra_infer/vision/common/processors/cast.h

@@ -42,7 +42,7 @@ public:
    * \param[in] mat The input image data
    * \param[in] dtype type of data will be casted to
    * \param[in] lib to define OpenCV or FlyCV or CVCUDA will be used.
-   * \return true if the process successed, otherwise false
+   * \return true if the process succeeded, otherwise false
    */
   static bool Run(Mat *mat, const std::string &dtype,
                   ProcLib lib = ProcLib::DEFAULT);

+ 1 - 1
libs/ultra-infer/ultra_infer/vision/common/processors/center_crop.h

@@ -46,7 +46,7 @@ public:
    * \param[in] width width of data will be croped to
    * \param[in] height height of data will be croped to
    * \param[in] lib to define OpenCV or FlyCV or CVCUDA will be used.
-   * \return true if the process successed, otherwise false
+   * \return true if the process succeeded, otherwise false
    */
   static bool Run(FDMat *mat, const int &width, const int &height,
                   ProcLib lib = ProcLib::DEFAULT);

+ 4 - 4
libs/ultra-infer/ultra_infer/vision/common/processors/color_space_convert.h

@@ -33,7 +33,7 @@ public:
    *
    * \param[in] mat The input image data
    * \param[in] lib to define OpenCV or FlyCV or CVCUDA will be used.
-   * \return true if the process successed, otherwise false
+   * \return true if the process succeeded, otherwise false
    */
   static bool Run(FDMat *mat, ProcLib lib = ProcLib::DEFAULT);
 };
@@ -52,7 +52,7 @@ public:
    *
    * \param[in] mat The input image data
    * \param[in] lib to define OpenCV or FlyCV or CVCUDA will be used.
-   * \return true if the process successed, otherwise false
+   * \return true if the process succeeded, otherwise false
    */
   static bool Run(FDMat *mat, ProcLib lib = ProcLib::DEFAULT);
 };
@@ -71,7 +71,7 @@ public:
    *
    * \param[in] mat The input image data
    * \param[in] lib to define OpenCV or FlyCV or CVCUDA will be used.
-   * \return true if the process successed, otherwise false
+   * \return true if the process succeeded, otherwise false
    */
   static bool Run(FDMat *mat, ProcLib lib = ProcLib::DEFAULT);
 };
@@ -90,7 +90,7 @@ public:
    *
    * \param[in] mat The input image data
    * \param[in] lib to define OpenCV or FlyCV or CVCUDA will be used.
-   * \return true if the process successed, otherwise false
+   * \return true if the process succeeded, otherwise false
    */
   static bool Run(FDMat *mat, ProcLib lib = ProcLib::DEFAULT);
 };

+ 1 - 1
libs/ultra-infer/ultra_infer/vision/common/processors/convert.h

@@ -38,7 +38,7 @@ public:
    * \param[in] alpha The alpha channel data
    * \param[in] beta The beta channel data
    * \param[in] lib to define OpenCV or FlyCV or CVCUDA will be used.
-   * \return true if the process successed, otherwise false
+   * \return true if the process succeeded, otherwise false
    */
   static bool Run(Mat *mat, const std::vector<float> &alpha,
                   const std::vector<float> &beta,

+ 1 - 1
libs/ultra-infer/ultra_infer/vision/common/processors/convert_and_permute.h

@@ -38,7 +38,7 @@ public:
    * \param[in] alpha The alpha channel data
    * \param[in] beta The beta channel data
    * \param[in] lib to define OpenCV or FlyCV or CVCUDA will be used.
-   * \return true if the process successed, otherwise false
+   * \return true if the process succeeded, otherwise false
    */
   static bool Run(FDMat *mat, const std::vector<float> &alpha,
                   const std::vector<float> &beta, bool swap_rb = false,

+ 1 - 1
libs/ultra-infer/ultra_infer/vision/common/processors/crop.h

@@ -45,7 +45,7 @@ public:
    * \param[in] width The width of the output image.
    * \param[in] height The height of the output image.
    * \param[in] lib to define OpenCV or FlyCV or CVCUDA will be used.
-   * \return true if the process successed, otherwise false
+   * \return true if the process succeeded, otherwise false
    */
   static bool Run(Mat *mat, int offset_w, int offset_h, int width, int height,
                   ProcLib lib = ProcLib::DEFAULT);

+ 1 - 1
libs/ultra-infer/ultra_infer/vision/common/processors/hwc2chw.h

@@ -41,7 +41,7 @@ public:
    *
    * \param[in] mat The input image data
    * \param[in] lib to define OpenCV or FlyCV or CVCUDA will be used.
-   * \return true if the process successed, otherwise false
+   * \return true if the process succeeded, otherwise false
    */
   static bool Run(Mat *mat, ProcLib lib = ProcLib::DEFAULT);
 

+ 1 - 1
libs/ultra-infer/ultra_infer/vision/common/processors/limit_by_stride.h

@@ -41,7 +41,7 @@ public:
    * \param[in] stride limit image stride, default is 32
    * \param[in] interp interpolation method, default is 1
    * \param[in] lib to define OpenCV or FlyCV or CVCUDA will be used.
-   * \return true if the process successed, otherwise false
+   * \return true if the process succeeded, otherwise false
    */
   static bool Run(Mat *mat, int stride = 32, int interp = 1,
                   ProcLib lib = ProcLib::DEFAULT);

+ 1 - 1
libs/ultra-infer/ultra_infer/vision/common/processors/limit_short.h

@@ -47,7 +47,7 @@ public:
    * \param[in] min_short target size of short edge
    * \param[in] interp interpolation method, default is 1
    * \param[in] lib to define OpenCV or FlyCV or CVCUDA will be used.
-   * \return true if the process successed, otherwise false
+   * \return true if the process succeeded, otherwise false
    */
   static bool Run(Mat *mat, int max_short = -1, int min_short = -1,
                   int interp = 1, ProcLib lib = ProcLib::DEFAULT);

+ 3 - 3
libs/ultra-infer/ultra_infer/vision/common/processors/manager.h

@@ -32,7 +32,7 @@ public:
    *
    * \param[in] enable_cv_cuda true: use CV-CUDA, false: use CUDA only
    * \param[in] gpu_id GPU device id
-   * \return true if the preprocess successed, otherwise false
+   * \return true if the preprocess succeeded, otherwise false
    */
   void UseCuda(bool enable_cv_cuda = false, int gpu_id = -1);
 
@@ -67,7 +67,7 @@ public:
    *
    * \param[in] images The input image data list, all the elements are returned
    * by cv::imread() \param[in] outputs The output tensors which will feed in
-   * runtime \return true if the preprocess successed, otherwise false
+   * runtime \return true if the preprocess succeeded, otherwise false
    */
   bool Run(std::vector<FDMat> *images, std::vector<FDTensor> *outputs);
 
@@ -76,7 +76,7 @@ public:
    *
    * \param[in] image_batch The input image batch
    * \param[in] outputs The output tensors which will feed in runtime
-   * \return true if the preprocess successed, otherwise false
+   * \return true if the preprocess succeeded, otherwise false
    */
   virtual bool Apply(FDMatBatch *image_batch,
                      std::vector<FDTensor> *outputs) = 0;

+ 1 - 1
libs/ultra-infer/ultra_infer/vision/common/processors/normalize.h

@@ -60,7 +60,7 @@ public:
    * \param[in] min min value vector to be in target image
    * \param[in] lib to define OpenCV or FlyCV or CVCUDA will be used.
    * \param[in] swap_rb to define whether to swap r and b channel order
-   * \return true if the process successed, otherwise false
+   * \return true if the process succeeded, otherwise false
    */
   static bool Run(Mat *mat, const std::vector<float> &mean,
                   const std::vector<float> &std, bool is_scale = true,

+ 1 - 1
libs/ultra-infer/ultra_infer/vision/common/processors/normalize_and_permute.h

@@ -60,7 +60,7 @@ public:
    * \param[in] min min value vector to be in target image
    * \param[in] lib to define OpenCV or FlyCV or CVCUDA will be used.
    * \param[in] swap_rb to define whether to swap r and b channel order
-   * \return true if the process successed, otherwise false
+   * \return true if the process succeeded, otherwise false
    */
   static bool Run(FDMat *mat, const std::vector<float> &mean,
                   const std::vector<float> &std, bool is_scale = true,

+ 1 - 1
libs/ultra-infer/ultra_infer/vision/common/processors/pad.h

@@ -54,7 +54,7 @@ public:
    * \param[in] right right pad size of the output image.
    * \param[in] value value vector used by padding of the output image.
    * \param[in] lib to define OpenCV or FlyCV or CVCUDA will be used.
-   * \return true if the process successed, otherwise false
+   * \return true if the process succeeded, otherwise false
    */
   static bool Run(Mat *mat, const int &top, const int &bottom, const int &left,
                   const int &right, const std::vector<float> &value,

+ 1 - 1
libs/ultra-infer/ultra_infer/vision/common/processors/pad_to_size.h

@@ -50,7 +50,7 @@ public:
    * \param[in] height height of the output image.
    * \param[in] value value vector used by padding of the output image.
    * \param[in] lib to define OpenCV or FlyCV or CVCUDA will be used.
-   * \return true if the process successed, otherwise false
+   * \return true if the process succeeded, otherwise false
    */
   static bool Run(Mat *mat, int width, int height,
                   const std::vector<float> &value,

+ 1 - 1
libs/ultra-infer/ultra_infer/vision/common/processors/resize.h

@@ -57,7 +57,7 @@ public:
    * \param[in] interp interpolation method, default is 1.
    * \param[in] use_scale to define whether to scale the image, default is
    * true. \param[in] lib to define OpenCV or FlyCV or CVCUDA will be used.
-   * \return true if the process successed, otherwise false
+   * \return true if the process succeeded, otherwise false
    */
   static bool Run(FDMat *mat, int width, int height, float scale_w = -1.0,
                   float scale_h = -1.0, int interp = 1, bool use_scale = false,

+ 1 - 1
libs/ultra-infer/ultra_infer/vision/common/processors/resize_by_short.h

@@ -53,7 +53,7 @@ public:
    * \param[in] use_scale to define whether to scale the image, default is
    * true. \param[in] max_hw max HW of output image. \param[in] lib to define
    * OpenCV or FlyCV or CVCUDA will be used. \return true if the process
-   * successed, otherwise false
+   * succeeded, otherwise false
    */
   static bool Run(FDMat *mat, int target_size, int interp = 1,
                   bool use_scale = true,

+ 1 - 1
libs/ultra-infer/ultra_infer/vision/common/processors/stride_pad.h

@@ -48,7 +48,7 @@ public:
    * \param[in] stride stride of the padding.
    * \param[in] value value vector used by padding of the output image.
    * \param[in] lib to define OpenCV or FlyCV or CVCUDA will be used.
-   * \return true if the process successed, otherwise false
+   * \return true if the process succeeded, otherwise false
    */
   static bool Run(Mat *mat, int stride,
                   const std::vector<float> &value = std::vector<float>(),

+ 2 - 2
libs/ultra-infer/ultra_infer/vision/detection/contrib/fastestdet/fastestdet.h

@@ -46,7 +46,7 @@ public:
    * \param[in] img The input image data, comes from cv::imread(), is a 3-D
    * array with layout HWC, BGR format \param[in] result The output detection
    * result will be written to this structure \return true if the prediction
-   * successed, otherwise false
+   * succeeded, otherwise false
    */
   virtual bool Predict(const cv::Mat &img, DetectionResult *result);
 
@@ -54,7 +54,7 @@ public:
    *
    * \param[in] imgs, The input image list, each element comes from cv::imread()
    * \param[in] results The output detection result list
-   * \return true if the prediction successed, otherwise false
+   * \return true if the prediction succeeded, otherwise false
    */
   virtual bool BatchPredict(const std::vector<cv::Mat> &imgs,
                             std::vector<DetectionResult> *results);

+ 1 - 1
libs/ultra-infer/ultra_infer/vision/detection/contrib/fastestdet/postprocessor.h

@@ -33,7 +33,7 @@ public:
    * \param[in] tensors The inference result from runtime
    * \param[in] result The output result of detection
    * \param[in] ims_info The shape info list, record input_shape and
-   * output_shape \return true if the postprocess successed, otherwise false
+   * output_shape \return true if the postprocess succeeded, otherwise false
    */
   bool
   Run(const std::vector<FDTensor> &tensors,

+ 1 - 1
libs/ultra-infer/ultra_infer/vision/detection/contrib/fastestdet/preprocessor.h

@@ -33,7 +33,7 @@ public:
    * \param[in] images The input image data list, all the elements are returned
    * by cv::imread() \param[in] outputs The output tensors which will feed in
    * runtime \param[in] ims_info The shape info list, record input_shape and
-   * output_shape \return true if the preprocess successed, otherwise false
+   * output_shape \return true if the preprocess succeeded, otherwise false
    */
   bool Run(std::vector<FDMat> *images, std::vector<FDTensor> *outputs,
            std::vector<std::map<std::string, std::array<float, 2>>> *ims_info);

+ 1 - 1
libs/ultra-infer/ultra_infer/vision/detection/contrib/nanodet_plus.h

@@ -51,7 +51,7 @@ public:
    * will be written to this structure \param[in] conf_threshold confidence
    * threshold for postprocessing, default is 0.35 \param[in] nms_iou_threshold
    * iou threshold for NMS, default is 0.5 \return true if the prediction
-   * successed, otherwise false
+   * succeeded, otherwise false
    */
   virtual bool Predict(cv::Mat *im, DetectionResult *result,
                        float conf_threshold = 0.35f,

+ 1 - 1
libs/ultra-infer/ultra_infer/vision/detection/contrib/rknpu2/postprocessor.h

@@ -34,7 +34,7 @@ public:
    * \param[in] tensors The inference result from runtime
    * \param[in] result The output result of detection
    * \param[in] ims_info The shape info list, record input_shape and
-   * output_shape \return true if the postprocess successed, otherwise false
+   * output_shape \return true if the postprocess succeeded, otherwise false
    */
   bool Run(const std::vector<FDTensor> &tensors,
            std::vector<DetectionResult> *results);

+ 1 - 1
libs/ultra-infer/ultra_infer/vision/detection/contrib/rknpu2/preprocessor.h

@@ -32,7 +32,7 @@ public:
    * \param[in] images The input image data list, all the elements are returned
    * by cv::imread() \param[in] outputs The output tensors which will feed in
    * runtime \param[in] ims_info The shape info list, record input_shape and
-   * output_shape \return true if the preprocess successed, otherwise false
+   * output_shape \return true if the preprocess succeeded, otherwise false
    */
   bool Run(std::vector<FDMat> *images, std::vector<FDTensor> *outputs);
 

+ 2 - 2
libs/ultra-infer/ultra_infer/vision/detection/contrib/rknpu2/rkyolo.h

@@ -35,7 +35,7 @@ public:
    * \param[in] img The input image data, comes from cv::imread(), is a 3-D
    * array with layout HWC, BGR format \param[in] result The output detection
    * result will be written to this structure \return true if the prediction
-   * successed, otherwise false
+   * succeeded, otherwise false
    */
   virtual bool Predict(const cv::Mat &img, DetectionResult *result);
 
@@ -43,7 +43,7 @@ public:
    *
    * \param[in] imgs, The input image list, each element comes from cv::imread()
    * \param[in] results The output detection result list
-   * \return true if the prediction successed, otherwise false
+   * \return true if the prediction succeeded, otherwise false
    */
   virtual bool BatchPredict(const std::vector<cv::Mat> &imgs,
                             std::vector<DetectionResult> *results);

+ 1 - 1
libs/ultra-infer/ultra_infer/vision/detection/contrib/scaledyolov4.h

@@ -48,7 +48,7 @@ public:
    * will be written to this structure \param[in] conf_threshold confidence
    * threshold for postprocessing, default is 0.25 \param[in] nms_iou_threshold
    * iou threshold for NMS, default is 0.5 \return true if the prediction
-   * successed, otherwise false
+   * succeeded, otherwise false
    */
   virtual bool Predict(cv::Mat *im, DetectionResult *result,
                        float conf_threshold = 0.25,

+ 1 - 1
libs/ultra-infer/ultra_infer/vision/detection/contrib/yolor.h

@@ -45,7 +45,7 @@ public:
    * \param[in] result The output detection result will be written to this
    * structure \param[in] conf_threshold confidence threshold for
    * postprocessing, default is 0.25 \param[in] nms_iou_threshold iou threshold
-   * for NMS, default is 0.5 \return true if the prediction successed, otherwise
+   * for NMS, default is 0.5 \return true if the prediction succeeded, otherwise
    * false
    */
   virtual bool Predict(cv::Mat *im, DetectionResult *result,

+ 1 - 1
libs/ultra-infer/ultra_infer/vision/detection/contrib/yolov5/postprocessor.h

@@ -33,7 +33,7 @@ public:
    * \param[in] tensors The inference result from runtime
    * \param[in] result The output result of detection
    * \param[in] ims_info The shape info list, record input_shape and
-   * output_shape \return true if the postprocess successed, otherwise false
+   * output_shape \return true if the postprocess succeeded, otherwise false
    */
   bool
   Run(const std::vector<FDTensor> &tensors,

+ 1 - 1
libs/ultra-infer/ultra_infer/vision/detection/contrib/yolov5/preprocessor.h

@@ -33,7 +33,7 @@ public:
    * \param[in] images The input image data list, all the elements are returned
    * by cv::imread() \param[in] outputs The output tensors which will feed in
    * runtime \param[in] ims_info The shape info list, record input_shape and
-   * output_shape \return true if the preprocess successed, otherwise false
+   * output_shape \return true if the preprocess succeeded, otherwise false
    */
   bool Run(std::vector<FDMat> *images, std::vector<FDTensor> *outputs,
            std::vector<std::map<std::string, std::array<float, 2>>> *ims_info);

+ 3 - 3
libs/ultra-infer/ultra_infer/vision/detection/contrib/yolov5/yolov5.h

@@ -49,7 +49,7 @@ public:
    * will be written to this structure \param[in] conf_threshold confidence
    * threshold for postprocessing, default is 0.25 \param[in] nms_threshold iou
    * threshold for NMS, default is 0.5 \return true if the prediction
-   * successed, otherwise false
+   * succeeded, otherwise false
    */
   virtual bool Predict(cv::Mat *im, DetectionResult *result,
                        float conf_threshold = 0.25, float nms_threshold = 0.5);
@@ -59,7 +59,7 @@ public:
    * \param[in] img The input image data, comes from cv::imread(), is a 3-D
    * array with layout HWC, BGR format \param[in] result The output detection
    * result will be written to this structure \return true if the prediction
-   * successed, otherwise false
+   * succeeded, otherwise false
    */
   virtual bool Predict(const cv::Mat &img, DetectionResult *result);
 
@@ -67,7 +67,7 @@ public:
    *
    * \param[in] imgs, The input image list, each element comes from cv::imread()
    * \param[in] results The output detection result list
-   * \return true if the prediction successed, otherwise false
+   * \return true if the prediction succeeded, otherwise false
    */
   virtual bool BatchPredict(const std::vector<cv::Mat> &imgs,
                             std::vector<DetectionResult> *results);

+ 1 - 1
libs/ultra-infer/ultra_infer/vision/detection/contrib/yolov5lite.h

@@ -48,7 +48,7 @@ public:
    * will be written to this structure \param[in] conf_threshold confidence
    * threshold for postprocessing, default is 0.45 \param[in] nms_iou_threshold
    * iou threshold for NMS, default is 0.25 \return true if the prediction
-   * successed, otherwise false
+   * succeeded, otherwise false
    */
   virtual bool Predict(cv::Mat *im, DetectionResult *result,
                        float conf_threshold = 0.45,

+ 1 - 1
libs/ultra-infer/ultra_infer/vision/detection/contrib/yolov5seg/postprocessor.h

@@ -33,7 +33,7 @@ public:
    * \param[in] tensors The inference result from runtime
    * \param[in] result The output result of detection
    * \param[in] ims_info The shape info list, record input_shape and
-   * output_shape \return true if the postprocess successed, otherwise false
+   * output_shape \return true if the postprocess succeeded, otherwise false
    */
   bool
   Run(const std::vector<FDTensor> &tensors,

+ 1 - 1
libs/ultra-infer/ultra_infer/vision/detection/contrib/yolov5seg/preprocessor.h

@@ -33,7 +33,7 @@ public:
    * \param[in] images The input image data list, all the elements are returned
    * by cv::imread() \param[in] outputs The output tensors which will feed in
    * runtime \param[in] ims_info The shape info list, record input_shape and
-   * output_shape \return true if the preprocess successed, otherwise false
+   * output_shape \return true if the preprocess succeeded, otherwise false
    */
   bool Run(std::vector<FDMat> *images, std::vector<FDTensor> *outputs,
            std::vector<std::map<std::string, std::array<float, 2>>> *ims_info);

+ 2 - 2
libs/ultra-infer/ultra_infer/vision/detection/contrib/yolov5seg/yolov5seg.h

@@ -46,7 +46,7 @@ public:
    * \param[in] img The input image data, comes from cv::imread(), is a 3-D
    * array with layout HWC, BGR format \param[in] result The output detection
    * result will be written to this structure \return true if the prediction
-   * successed, otherwise false
+   * succeeded, otherwise false
    */
   virtual bool Predict(const cv::Mat &img, DetectionResult *result);
 
@@ -54,7 +54,7 @@ public:
    *
    * \param[in] imgs, The input image list, each element comes from cv::imread()
    * \param[in] results The output detection result list
-   * \return true if the prediction successed, otherwise false
+   * \return true if the prediction succeeded, otherwise false
    */
   virtual bool BatchPredict(const std::vector<cv::Mat> &imgs,
                             std::vector<DetectionResult> *results);

+ 1 - 1
libs/ultra-infer/ultra_infer/vision/detection/contrib/yolov6.h

@@ -51,7 +51,7 @@ public:
    * will be written to this structure \param[in] conf_threshold confidence
    * threshold for postprocessing, default is 0.25 \param[in] nms_iou_threshold
    * iou threshold for NMS, default is 0.5 \return true if the prediction
-   * successed, otherwise false
+   * succeeded, otherwise false
    */
   virtual bool Predict(cv::Mat *im, DetectionResult *result,
                        float conf_threshold = 0.25,

+ 1 - 1
libs/ultra-infer/ultra_infer/vision/detection/contrib/yolov7/postprocessor.h

@@ -33,7 +33,7 @@ public:
    * \param[in] tensors The inference result from runtime
    * \param[in] result The output result of detection
    * \param[in] ims_info The shape info list, record input_shape and
-   * output_shape \return true if the postprocess successed, otherwise false
+   * output_shape \return true if the postprocess succeeded, otherwise false
    */
   bool
   Run(const std::vector<FDTensor> &tensors,

+ 1 - 1
libs/ultra-infer/ultra_infer/vision/detection/contrib/yolov7/preprocessor.h

@@ -33,7 +33,7 @@ public:
    * \param[in] images The input image data list, all the elements are returned
    * by cv::imread() \param[in] outputs The output tensors which will feed in
    * runtime \param[in] ims_info The shape info list, record input_shape and
-   * output_shape \return true if the preprocess successed, otherwise false
+   * output_shape \return true if the preprocess succeeded, otherwise false
    */
   bool Run(std::vector<FDMat> *images, std::vector<FDTensor> *outputs,
            std::vector<std::map<std::string, std::array<float, 2>>> *ims_info);

+ 3 - 3
libs/ultra-infer/ultra_infer/vision/detection/contrib/yolov7/yolov7.h

@@ -49,7 +49,7 @@ public:
    * will be written to this structure \param[in] conf_threshold confidence
    * threshold for postprocessing, default is 0.25 \param[in] nms_threshold iou
    * threshold for NMS, default is 0.5 \return true if the prediction
-   * successed, otherwise false
+   * succeeded, otherwise false
    */
   virtual bool Predict(cv::Mat *im, DetectionResult *result,
                        float conf_threshold = 0.25, float nms_threshold = 0.5);
@@ -59,7 +59,7 @@ public:
    * \param[in] img The input image data, comes from cv::imread(), is a 3-D
    * array with layout HWC, BGR format \param[in] result The output detection
    * result will be written to this structure \return true if the prediction
-   * successed, otherwise false
+   * succeeded, otherwise false
    */
   virtual bool Predict(const cv::Mat &img, DetectionResult *result);
 
@@ -67,7 +67,7 @@ public:
    *
    * \param[in] imgs, The input image list, each element comes from cv::imread()
    * \param[in] results The output detection result list
-   * \return true if the prediction successed, otherwise false
+   * \return true if the prediction succeeded, otherwise false
    */
   virtual bool BatchPredict(const std::vector<cv::Mat> &imgs,
                             std::vector<DetectionResult> *results);

+ 1 - 1
libs/ultra-infer/ultra_infer/vision/detection/contrib/yolov7end2end_ort.h

@@ -46,7 +46,7 @@ public:
    * with layout HWC, BGR format \param[in] result The output detection result
    * will be written to this structure \param[in] conf_threshold confidence
    * threshold for postprocessing, default is 0.25 \return true if the
-   * prediction successed, otherwise false
+   * prediction succeeded, otherwise false
    */
   virtual bool Predict(cv::Mat *im, DetectionResult *result,
                        float conf_threshold = 0.25);

+ 1 - 1
libs/ultra-infer/ultra_infer/vision/detection/contrib/yolov7end2end_trt.h

@@ -48,7 +48,7 @@ public:
    * with layout HWC, BGR format \param[in] result The output detection result
    * will be written to this structure \param[in] conf_threshold confidence
    * threshold for postprocessing, default is 0.25 \return true if the
-   * prediction successed, otherwise false
+   * prediction succeeded, otherwise false
    */
   virtual bool Predict(cv::Mat *im, DetectionResult *result,
                        float conf_threshold = 0.25);

+ 1 - 1
libs/ultra-infer/ultra_infer/vision/detection/contrib/yolov8/postprocessor.h

@@ -33,7 +33,7 @@ public:
    * \param[in] tensors The inference result from runtime
    * \param[in] result The output result of detection
    * \param[in] ims_info The shape info list, record input_shape and
-   * output_shape \return true if the postprocess successed, otherwise false
+   * output_shape \return true if the postprocess succeeded, otherwise false
    */
   bool
   Run(const std::vector<FDTensor> &tensors,

+ 1 - 1
libs/ultra-infer/ultra_infer/vision/detection/contrib/yolov8/preprocessor.h

@@ -33,7 +33,7 @@ public:
    * \param[in] images The input image data list, all the elements are returned
    * by cv::imread() \param[in] outputs The output tensors which will feed in
    * runtime \param[in] ims_info The shape info list, record input_shape and
-   * output_shape \return true if the preprocess successed, otherwise false
+   * output_shape \return true if the preprocess succeeded, otherwise false
    */
   bool Run(std::vector<FDMat> *images, std::vector<FDTensor> *outputs,
            std::vector<std::map<std::string, std::array<float, 2>>> *ims_info);

+ 2 - 2
libs/ultra-infer/ultra_infer/vision/detection/contrib/yolov8/yolov8.h

@@ -46,7 +46,7 @@ public:
    * \param[in] img The input image data, comes from cv::imread(), is a 3-D
    * array with layout HWC, BGR format \param[in] result The output detection
    * result will be written to this structure \return true if the prediction
-   * successed, otherwise false
+   * succeeded, otherwise false
    */
   virtual bool Predict(const cv::Mat &img, DetectionResult *result);
 
@@ -54,7 +54,7 @@ public:
    *
    * \param[in] imgs, The input image list, each element comes from cv::imread()
    * \param[in] results The output detection result list
-   * \return true if the prediction successed, otherwise false
+   * \return true if the prediction succeeded, otherwise false
    */
   virtual bool BatchPredict(const std::vector<cv::Mat> &imgs,
                             std::vector<DetectionResult> *results);

+ 1 - 1
libs/ultra-infer/ultra_infer/vision/detection/contrib/yolox.h

@@ -48,7 +48,7 @@ public:
    * will be written to this structure \param[in] conf_threshold confidence
    * threshold for postprocessing, default is 0.25 \param[in] nms_iou_threshold
    * iou threshold for NMS, default is 0.5 \return true if the prediction
-   * successed, otherwise false
+   * succeeded, otherwise false
    */
   virtual bool Predict(cv::Mat *im, DetectionResult *result,
                        float conf_threshold = 0.25,

+ 3 - 3
libs/ultra-infer/ultra_infer/vision/detection/ppdet/base.h

@@ -64,21 +64,21 @@ public:
    *
    * \param[in] im The input image data, comes from cv::imread(), is a 3-D array
    * with layout HWC, BGR format \param[in] result The output detection result
-   * \return true if the prediction successed, otherwise false
+   * \return true if the prediction succeeded, otherwise false
    */
   virtual bool Predict(cv::Mat *im, DetectionResult *result);
 
   /** \brief Predict the detection result for an input image
    * \param[in] im The input image data, comes from cv::imread(), is a 3-D array
    * with layout HWC, BGR format \param[in] result The output detection result
-   * \return true if the prediction successed, otherwise false
+   * \return true if the prediction succeeded, otherwise false
    */
   virtual bool Predict(const cv::Mat &im, DetectionResult *result);
 
   /** \brief Predict the detection result for an input image list
    * \param[in] im The input image list, all the elements come from
    * cv::imread(), is a 3-D array with layout HWC, BGR format \param[in] results
-   * The output detection result list \return true if the prediction successed,
+   * The output detection result list \return true if the prediction succeeded,
    * otherwise false
    */
   virtual bool BatchPredict(const std::vector<cv::Mat> &imgs,

+ 1 - 1
libs/ultra-infer/ultra_infer/vision/detection/ppdet/postprocessor.h

@@ -50,7 +50,7 @@ public:
    *
    * \param[in] tensors The inference result from runtime
    * \param[in] result The output result of detection
-   * \return true if the postprocess successed, otherwise false
+   * \return true if the postprocess succeeded, otherwise false
    */
   bool Run(const std::vector<FDTensor> &tensors,
            std::vector<DetectionResult> *result);

+ 1 - 1
libs/ultra-infer/ultra_infer/vision/detection/ppdet/preprocessor.h

@@ -39,7 +39,7 @@ public:
    *
    * \param[in] image_batch The input image batch
    * \param[in] outputs The output tensors which will feed in runtime
-   * \return true if the preprocess successed, otherwise false
+   * \return true if the preprocess succeeded, otherwise false
    */
   virtual bool Apply(FDMatBatch *image_batch, std::vector<FDTensor> *outputs);
 

+ 1 - 1
libs/ultra-infer/ultra_infer/vision/facealign/contrib/face_landmark_1000.h

@@ -47,7 +47,7 @@ public:
    * \param[in] im The input image data, comes from cv::imread(), is a 3-D array
    * with layout HWC, BGR format \param[in] result The output face detection
    * result will be written to this structure \return true if the prediction
-   * successed, otherwise false
+   * succeeded, otherwise false
    */
   virtual bool Predict(cv::Mat *im, FaceAlignmentResult *result);
 

+ 1 - 1
libs/ultra-infer/ultra_infer/vision/facealign/contrib/pfld.h

@@ -45,7 +45,7 @@ public:
    * \param[in] im The input image data, comes from cv::imread(), is a 3-D array
    * with layout HWC, BGR format \param[in] result The output face detection
    * result will be written to this structure \return true if the prediction
-   * successed, otherwise false
+   * succeeded, otherwise false
    */
   virtual bool Predict(cv::Mat *im, FaceAlignmentResult *result);
 

+ 1 - 1
libs/ultra-infer/ultra_infer/vision/facealign/contrib/pipnet.h

@@ -46,7 +46,7 @@ public:
    * \param[in] im The input image data, comes from cv::imread(), is a 3-D array
    * with layout HWC, BGR format \param[in] result The output face detection
    * result will be written to this structure \return true if the prediction
-   * successed, otherwise false
+   * succeeded, otherwise false
    */
   virtual bool Predict(cv::Mat *im, FaceAlignmentResult *result);
 

+ 2 - 2
libs/ultra-infer/ultra_infer/vision/facedet/contrib/centerface/centerface.h

@@ -49,7 +49,7 @@ public:
    * \param[in] img The input image data, comes from cv::imread(), is a 3-D
    * array with layout HWC, BGR format \param[in] result The output detection
    * result will be written to this structure \return true if the prediction
-   * successed, otherwise false
+   * succeeded, otherwise false
    */
   virtual bool Predict(const cv::Mat &im, FaceDetectionResult *result);
 
@@ -57,7 +57,7 @@ public:
    *
    * \param[in] imgs, The input image list, each element comes from cv::imread()
    * \param[in] results The output detection result list
-   * \return true if the prediction successed, otherwise false
+   * \return true if the prediction succeeded, otherwise false
    */
   virtual bool BatchPredict(const std::vector<cv::Mat> &images,
                             std::vector<FaceDetectionResult> *results);

+ 1 - 1
libs/ultra-infer/ultra_infer/vision/facedet/contrib/centerface/postprocessor.h

@@ -34,7 +34,7 @@ public:
    * \param[in] infer_result The inference result from runtime
    * \param[in] results The output result of detection
    * \param[in] ims_info The shape info list, record input_shape and
-   * output_shape \return true if the postprocess successed, otherwise false
+   * output_shape \return true if the postprocess succeeded, otherwise false
    */
   bool
   Run(const std::vector<FDTensor> &infer_result,

+ 1 - 1
libs/ultra-infer/ultra_infer/vision/facedet/contrib/retinaface.h

@@ -51,7 +51,7 @@ public:
    * result will be written to this structure \param[in] conf_threshold
    * confidence threshold for postprocessing, default is 0.25 \param[in]
    * nms_iou_threshold iou threshold for NMS, default is 0.4 \return true if
-   * the prediction successed, otherwise false
+   * the prediction succeeded, otherwise false
    */
   virtual bool Predict(cv::Mat *im, FaceDetectionResult *result,
                        float conf_threshold = 0.25f,

+ 1 - 1
libs/ultra-infer/ultra_infer/vision/facedet/contrib/scrfd.h

@@ -48,7 +48,7 @@ public:
    * result will be written to this structure \param[in] conf_threshold
    * confidence threshold for postprocessing, default is 0.25 \param[in]
    * nms_iou_threshold iou threshold for NMS, default is 0.4 \return true if
-   * the prediction successed, otherwise false
+   * the prediction succeeded, otherwise false
    */
   virtual bool Predict(cv::Mat *im, FaceDetectionResult *result,
                        float conf_threshold = 0.25f,

+ 1 - 1
libs/ultra-infer/ultra_infer/vision/facedet/contrib/ultraface.h

@@ -50,7 +50,7 @@ public:
    * result will be written to this structure \param[in] conf_threshold
    * confidence threshold for postprocessing, default is 0.7 \param[in]
    * nms_iou_threshold iou threshold for NMS, default is 0.3 \return true if
-   * the prediction successed, otherwise false
+   * the prediction succeeded, otherwise false
    */
   virtual bool Predict(cv::Mat *im, FaceDetectionResult *result,
                        float conf_threshold = 0.7f,

+ 1 - 1
libs/ultra-infer/ultra_infer/vision/facedet/contrib/yolov5face.h

@@ -48,7 +48,7 @@ public:
    * result will be written to this structure \param[in] conf_threshold
    * confidence threshold for postprocessing, default is 0.25 \param[in]
    * nms_iou_threshold iou threshold for NMS, default is 0.5 \return true if
-   * the prediction successed, otherwise false
+   * the prediction succeeded, otherwise false
    */
   virtual bool Predict(cv::Mat *im, FaceDetectionResult *result,
                        float conf_threshold = 0.25,

+ 1 - 1
libs/ultra-infer/ultra_infer/vision/facedet/contrib/yolov7face/postprocessor.h

@@ -34,7 +34,7 @@ public:
    * \param[in] infer_result The inference result from runtime
    * \param[in] results The output result of detection
    * \param[in] ims_info The shape info list, record input_shape and
-   * output_shape \return true if the postprocess successed, otherwise false
+   * output_shape \return true if the postprocess succeeded, otherwise false
    */
   bool
   Run(const std::vector<FDTensor> &infer_result,

+ 2 - 2
libs/ultra-infer/ultra_infer/vision/facedet/contrib/yolov7face/yolov7face.h

@@ -49,7 +49,7 @@ public:
    * \param[in] img The input image data, comes from cv::imread(), is a 3-D
    * array with layout HWC, BGR format \param[in] result The output detection
    * result will be written to this structure \return true if the prediction
-   * successed, otherwise false
+   * succeeded, otherwise false
    */
   virtual bool Predict(const cv::Mat &im, FaceDetectionResult *result);
 
@@ -57,7 +57,7 @@ public:
    *
    * \param[in] imgs, The input image list, each element comes from cv::imread()
    * \param[in] results The output detection result list
-   * \return true if the prediction successed, otherwise false
+   * \return true if the prediction succeeded, otherwise false
    */
   virtual bool BatchPredict(const std::vector<cv::Mat> &images,
                             std::vector<FaceDetectionResult> *results);

+ 2 - 2
libs/ultra-infer/ultra_infer/vision/facedet/ppdet/blazeface/blazeface.h

@@ -52,7 +52,7 @@ public:
    * \param[in] img The input image data, comes from cv::imread(), is a 3-D
    * array with layout HWC, BGR format \param[in] result The output detection
    * result will be written to this structure \return true if the prediction
-   * successed, otherwise false
+   * succeeded, otherwise false
    */
   bool Predict(const cv::Mat &im, FaceDetectionResult *result);
 
@@ -60,7 +60,7 @@ public:
    *
    * \param[in] imgs, The input image list, each element comes from cv::imread()
    * \param[in] results The output detection result list
-   * \return true if the prediction successed, otherwise false
+   * \return true if the prediction succeeded, otherwise false
    */
   virtual bool BatchPredict(const std::vector<cv::Mat> &images,
                             std::vector<FaceDetectionResult> *results);

+ 1 - 1
libs/ultra-infer/ultra_infer/vision/facedet/ppdet/blazeface/postprocessor.h

@@ -34,7 +34,7 @@ public:
    * \param[in] infer_result The inference result from runtime
    * \param[in] results The output result of detection
    * \param[in] ims_info The shape info list, record input_shape and
-   * output_shape \return true if the postprocess successed, otherwise false
+   * output_shape \return true if the postprocess succeeded, otherwise false
    */
   bool
   Run(const std::vector<FDTensor> &infer_result,

+ 2 - 2
libs/ultra-infer/ultra_infer/vision/faceid/contrib/adaface/adaface.h

@@ -46,7 +46,7 @@ public:
    * \param[in] img The input image data, comes from cv::imread(), is a 3-D
    * array with layout HWC, BGR format \param[in] result The output
    * FaceRecognitionResult will be written to this structure \return true if the
-   * prediction successed, otherwise false
+   * prediction succeeded, otherwise false
    */
   virtual bool Predict(const cv::Mat &im, FaceRecognitionResult *result);
 
@@ -54,7 +54,7 @@ public:
    *
    * \param[in] imgs, The input image list, each element comes from cv::imread()
    * \param[in] results The output FaceRecognitionResult list
-   * \return true if the prediction successed, otherwise false
+   * \return true if the prediction succeeded, otherwise false
    */
   virtual bool BatchPredict(const std::vector<cv::Mat> &images,
                             std::vector<FaceRecognitionResult> *results);

+ 1 - 1
libs/ultra-infer/ultra_infer/vision/faceid/contrib/adaface/postprocessor.h

@@ -33,7 +33,7 @@ public:
    *
    * \param[in] tensors The inference result from runtime
    * \param[in] result The output result of FaceRecognitionResult
-   * \return true if the postprocess successed, otherwise false
+   * \return true if the postprocess succeeded, otherwise false
    */
   bool Run(std::vector<FDTensor> &infer_result,
            std::vector<FaceRecognitionResult> *results);

+ 1 - 1
libs/ultra-infer/ultra_infer/vision/faceid/contrib/adaface/preprocessor.h

@@ -32,7 +32,7 @@ public:
    *
    * \param[in] images The input image data list, all the elements are returned
    * by cv::imread() \param[in] outputs The output tensors which will feed in
-   * runtime \return true if the preprocess successed, otherwise false
+   * runtime \return true if the preprocess succeeded, otherwise false
    */
   bool Run(std::vector<FDMat> *images, std::vector<FDTensor> *outputs);
 

+ 2 - 2
libs/ultra-infer/ultra_infer/vision/faceid/contrib/insightface/base.h

@@ -47,7 +47,7 @@ public:
    * \param[in] img The input image data, comes from cv::imread(), is a 3-D
    * array with layout HWC, BGR format \param[in] result The output
    * FaceRecognitionResult will be written to this structure \return true if the
-   * prediction successed, otherwise false
+   * prediction succeeded, otherwise false
    */
   virtual bool Predict(const cv::Mat &im, FaceRecognitionResult *result);
 
@@ -55,7 +55,7 @@ public:
    *
    * \param[in] imgs, The input image list, each element comes from cv::imread()
    * \param[in] results The output FaceRecognitionResult list
-   * \return true if the prediction successed, otherwise false
+   * \return true if the prediction succeeded, otherwise false
    */
   virtual bool BatchPredict(const std::vector<cv::Mat> &images,
                             std::vector<FaceRecognitionResult> *results);

+ 1 - 1
libs/ultra-infer/ultra_infer/vision/faceid/contrib/insightface/postprocessor.h

@@ -34,7 +34,7 @@ public:
    *
    * \param[in] tensors The inference result from runtime
    * \param[in] result The output result of FaceRecognitionResult
-   * \return true if the postprocess successed, otherwise false
+   * \return true if the postprocess succeeded, otherwise false
    */
   bool Run(std::vector<FDTensor> &infer_result,
            std::vector<FaceRecognitionResult> *results);

+ 1 - 1
libs/ultra-infer/ultra_infer/vision/faceid/contrib/insightface/preprocessor.h

@@ -33,7 +33,7 @@ public:
    *
    * \param[in] images The input image data list, all the elements are returned
    * by cv::imread() \param[in] outputs The output tensors which will feed in
-   * runtime \return true if the preprocess successed, otherwise false
+   * runtime \return true if the preprocess succeeded, otherwise false
    */
   bool Run(std::vector<FDMat> *images, std::vector<FDTensor> *outputs);
 

+ 2 - 2
libs/ultra-infer/ultra_infer/vision/generation/contrib/animegan.h

@@ -47,7 +47,7 @@ public:
    * \param[in] im The input image data, comes from cv::imread(), is a 3-D array
    * with layout HWC, BGR format \param[in] result The output style transfer
    * result will be written to this structure \return true if the prediction
-   * successed, otherwise false
+   * succeeded, otherwise false
    */
   bool Predict(cv::Mat &img, cv::Mat *result);
 
@@ -56,7 +56,7 @@ public:
    * \param[in] images The list of input images, each element comes from
    * cv::imread(), is a 3-D array with layout HWC, BGR format \param[in] results
    * The list of output style transfer results will be written to this structure
-   * \return true if the batch prediction successed, otherwise false
+   * \return true if the batch prediction succeeded, otherwise false
    */
   bool BatchPredict(const std::vector<cv::Mat> &images,
                     std::vector<cv::Mat> *results);

+ 1 - 1
libs/ultra-infer/ultra_infer/vision/generation/contrib/postprocessor.h

@@ -32,7 +32,7 @@ public:
    *
    * \param[in] infer_results The inference results from runtime
    * \param[in] results The output results of style transfer
-   * \return true if the postprocess successed, otherwise false
+   * \return true if the postprocess succeeded, otherwise false
    */
   bool Run(std::vector<FDTensor> &infer_results, std::vector<cv::Mat> *results);
 };

+ 1 - 1
libs/ultra-infer/ultra_infer/vision/generation/contrib/preprocessor.h

@@ -32,7 +32,7 @@ public:
    *
    * \param[in] images The input image data list, all the elements are returned
    * wrapped by FDMat. \param[in] output The output tensors which will feed in
-   * runtime \return true if the preprocess successed, otherwise false
+   * runtime \return true if the preprocess succeeded, otherwise false
    */
   bool Run(std::vector<Mat> &images, std::vector<FDTensor> *output);
 };

+ 1 - 1
libs/ultra-infer/ultra_infer/vision/headpose/contrib/fsanet.h

@@ -46,7 +46,7 @@ public:
    * \param[in] im The input image data, comes from cv::imread(), is a 3-D array
    * with layout HWC, BGR format \param[in] result The output face detection
    * result will be written to this structure \return true if the prediction
-   * successed, otherwise false
+   * succeeded, otherwise false
    */
   virtual bool Predict(cv::Mat *im, HeadPoseResult *result);
 

+ 2 - 2
libs/ultra-infer/ultra_infer/vision/keypointdet/pptinypose/pptinypose.h

@@ -55,7 +55,7 @@ public:
    *
    * \param[in] im The input image data, comes from cv::imread()
    * \param[in] result The output keypoint detection result will be written to
-   * this structure \return true if the keypoint prediction successed, otherwise
+   * this structure \return true if the keypoint prediction succeeded, otherwise
    * false
    */
   bool Predict(cv::Mat *im, KeyPointDetectionResult *result);
@@ -67,7 +67,7 @@ public:
    * \param[in] result The output keypoint detection result will be written to
    * this structure \param[in] detection_result The structure stores pedestrian
    * detection result, which is used to crop image for multi-persons keypoint
-   * detection \return true if the keypoint prediction successed, otherwise
+   * detection \return true if the keypoint prediction succeeded, otherwise
    * false
    */
   bool Predict(cv::Mat *im, KeyPointDetectionResult *result,

+ 1 - 1
libs/ultra-infer/ultra_infer/vision/matting/contrib/modnet.h

@@ -67,7 +67,7 @@ public:
    *
    * \param[in] im The input image data, comes from cv::imread(), is a 3-D array
    * with layout HWC, BGR format \param[in] result The output matting result
-   * will be written to this structure \return true if the prediction successed,
+   * will be written to this structure \return true if the prediction succeeded,
    * otherwise false
    */
   bool Predict(cv::Mat *im, MattingResult *result);

+ 1 - 1
libs/ultra-infer/ultra_infer/vision/matting/contrib/rvm.h

@@ -52,7 +52,7 @@ public:
    *
    * \param[in] im The input image data, comes from cv::imread()
    * \param[in] result The output matting result will be written to this
-   * structure \return true if the prediction successed, otherwise false
+   * structure \return true if the prediction succeeded, otherwise false
    */
   bool Predict(cv::Mat *im, MattingResult *result);
 

+ 1 - 1
libs/ultra-infer/ultra_infer/vision/matting/ppmatting/ppmatting.h

@@ -49,7 +49,7 @@ public:
    *
    * \param[in] im The input image data, comes from cv::imread(), is a 3-D array
    * with layout HWC, BGR format \param[in] result The output matting result
-   * will be written to this structure \return true if the prediction successed,
+   * will be written to this structure \return true if the prediction succeeded,
    * otherwise false
    */
   virtual bool Predict(cv::Mat *im, MattingResult *result);

+ 4 - 4
libs/ultra-infer/ultra_infer/vision/ocr/ppocr/classifier.h

@@ -64,7 +64,7 @@ public:
    * array with layout HWC, BGR format. \param[in] cls_label The label result of
    * cls model will be written in to this param. \param[in] cls_score The score
    * result of cls model will be written in to this param. \return true if the
-   * prediction is successed, otherwise false.
+   * prediction is succeeded, otherwise false.
    */
   virtual bool Predict(const cv::Mat &img, int32_t *cls_label,
                        float *cls_score);
@@ -74,7 +74,7 @@ public:
    * \param[in] img The input image data, comes from cv::imread(), is a 3-D
    * array with layout HWC, BGR format. \param[in] ocr_result The output of OCR
    * recognition model result will be written to this structure. \return true if
-   * the prediction is successed, otherwise false.
+   * the prediction is succeeded, otherwise false.
    */
   virtual bool Predict(const cv::Mat &img, vision::OCRResult *ocr_result);
 
@@ -84,7 +84,7 @@ public:
    * \param[in] img The input image data, comes from cv::imread(), is a 3-D
    * array with layout HWC, BGR format. \param[in] ocr_result The output of OCR
    * classification model result will be written to this structure. \return true
-   * if the prediction is successed, otherwise false.
+   * if the prediction is succeeded, otherwise false.
    */
   virtual bool BatchPredict(const std::vector<cv::Mat> &images,
                             vision::OCRResult *ocr_result);
@@ -96,7 +96,7 @@ public:
    * a 3-D array with layout HWC, BGR format. \param[in] cls_labels The label
    * results of cls model will be written in to this vector. \param[in]
    * cls_scores The score results of cls model will be written in to this
-   * vector. \return true if the prediction is successed, otherwise false.
+   * vector. \return true if the prediction is succeeded, otherwise false.
    */
   virtual bool BatchPredict(const std::vector<cv::Mat> &images,
                             std::vector<int32_t> *cls_labels,

+ 1 - 1
libs/ultra-infer/ultra_infer/vision/ocr/ppocr/cls_postprocessor.h

@@ -30,7 +30,7 @@ public:
    * \param[in] tensors The inference result from runtime
    * \param[in] cls_labels The output label results of classification model
    * \param[in] cls_scores The output score results of classification model
-   * \return true if the postprocess successed, otherwise false
+   * \return true if the postprocess succeeded, otherwise false
    */
   bool Run(const std::vector<FDTensor> &tensors,
            std::vector<int32_t> *cls_labels, std::vector<float> *cls_scores);

+ 2 - 2
libs/ultra-infer/ultra_infer/vision/ocr/ppocr/cls_preprocessor.h

@@ -31,7 +31,7 @@ public:
    *
    * \param[in] images The input data list, all the elements are FDMat
    * \param[in] outputs The output tensors which will be fed into runtime
-   * \return true if the preprocess successed, otherwise false
+   * \return true if the preprocess succeeded, otherwise false
    */
   bool Run(std::vector<FDMat> *images, std::vector<FDTensor> *outputs,
            size_t start_index, size_t end_index);
@@ -42,7 +42,7 @@ public:
    *
    * \param[in] image_batch The input image batch
    * \param[in] outputs The output tensors which will feed in runtime
-   * \return true if the preprocess successed, otherwise false
+   * \return true if the preprocess succeeded, otherwise false
    */
   virtual bool Apply(FDMatBatch *image_batch, std::vector<FDTensor> *outputs);
 

+ 4 - 4
libs/ultra-infer/ultra_infer/vision/ocr/ppocr/dbcurvedetector.h

@@ -64,7 +64,7 @@ public:
    * \param[in] img The input image data, comes from cv::imread(), is a 3-D
    * array with layout HWC, BGR format. \param[in] boxes_result The output of
    * OCR detection model result will be written to this structure. \return true
-   * if the prediction is successed, otherwise false.
+   * if the prediction is succeeded, otherwise false.
    */
   virtual bool Predict(const cv::Mat &img,
                        std::vector<std::vector<int>> *boxes_result);
@@ -74,7 +74,7 @@ public:
    * \param[in] img The input image data, comes from cv::imread(), is a 3-D
    * array with layout HWC, BGR format. \param[in] ocr_result The output of OCR
    * detection model result will be written to this structure. \return true if
-   * the prediction is successed, otherwise false.
+   * the prediction is succeeded, otherwise false.
    */
   virtual bool Predict(const cv::Mat &img, vision::OCRCURVEResult *ocr_result);
 
@@ -83,7 +83,7 @@ public:
    * \param[in] images The list input of image data, comes from cv::imread(), is
    * a 3-D array with layout HWC, BGR format. \param[in] det_results The output
    * of OCR detection model result will be written to this structure. \return
-   * true if the prediction is successed, otherwise false.
+   * true if the prediction is succeeded, otherwise false.
    */
   virtual bool
   BatchPredict(const std::vector<cv::Mat> &images,
@@ -94,7 +94,7 @@ public:
    * \param[in] images The list input of image data, comes from cv::imread(), is
    * a 3-D array with layout HWC, BGR format. \param[in] ocr_results The output
    * of OCR detection model result will be written to this structure. \return
-   * true if the prediction is successed, otherwise false.
+   * true if the prediction is succeeded, otherwise false.
    */
   virtual bool BatchPredict(const std::vector<cv::Mat> &images,
                             std::vector<vision::OCRCURVEResult> *ocr_results);

+ 4 - 4
libs/ultra-infer/ultra_infer/vision/ocr/ppocr/dbdetector.h

@@ -63,7 +63,7 @@ public:
    * \param[in] img The input image data, comes from cv::imread(), is a 3-D
    * array with layout HWC, BGR format. \param[in] boxes_result The output of
    * OCR detection model result will be written to this structure. \return true
-   * if the prediction is successed, otherwise false.
+   * if the prediction is succeeded, otherwise false.
    */
   virtual bool Predict(const cv::Mat &img,
                        std::vector<std::array<int, 8>> *boxes_result);
@@ -73,7 +73,7 @@ public:
    * \param[in] img The input image data, comes from cv::imread(), is a 3-D
    * array with layout HWC, BGR format. \param[in] ocr_result The output of OCR
    * detection model result will be written to this structure. \return true if
-   * the prediction is successed, otherwise false.
+   * the prediction is succeeded, otherwise false.
    */
   virtual bool Predict(const cv::Mat &img, vision::OCRResult *ocr_result);
 
@@ -82,7 +82,7 @@ public:
    * \param[in] images The list input of image data, comes from cv::imread(), is
    * a 3-D array with layout HWC, BGR format. \param[in] det_results The output
    * of OCR detection model result will be written to this structure. \return
-   * true if the prediction is successed, otherwise false.
+   * true if the prediction is succeeded, otherwise false.
    */
   virtual bool
   BatchPredict(const std::vector<cv::Mat> &images,
@@ -93,7 +93,7 @@ public:
    * \param[in] images The list input of image data, comes from cv::imread(), is
    * a 3-D array with layout HWC, BGR format. \param[in] ocr_results The output
    * of OCR detection model result will be written to this structure. \return
-   * true if the prediction is successed, otherwise false.
+   * true if the prediction is succeeded, otherwise false.
    */
   virtual bool BatchPredict(const std::vector<cv::Mat> &images,
                             std::vector<vision::OCRResult> *ocr_results);

+ 1 - 1
libs/ultra-infer/ultra_infer/vision/ocr/ppocr/det_postprocessor.h

@@ -30,7 +30,7 @@ public:
    * \param[in] tensors The inference result from runtime
    * \param[in] results The output result of detector
    * \param[in] batch_det_img_info The detector_preprocess result
-   * \return true if the postprocess successed, otherwise false
+   * \return true if the postprocess succeeded, otherwise false
    */
   bool Run(const std::vector<FDTensor> &tensors,
            std::vector<std::vector<std::array<int, 8>>> *results,

+ 1 - 1
libs/ultra-infer/ultra_infer/vision/ocr/ppocr/det_postprocessor_curve.h

@@ -30,7 +30,7 @@ public:
    * \param[in] tensors The inference result from runtime
    * \param[in] results The output result of detector
    * \param[in] batch_det_img_info The detector_preprocess result
-   * \return true if the postprocess successed, otherwise false
+   * \return true if the postprocess succeeded, otherwise false
    */
   bool Run(const std::vector<FDTensor> &tensors,
            std::vector<std::vector<std::vector<int>>> *results,

+ 1 - 1
libs/ultra-infer/ultra_infer/vision/ocr/ppocr/det_preprocessor.h

@@ -33,7 +33,7 @@ public:
    *
    * \param[in] image_batch The input image batch
    * \param[in] outputs The output tensors which will feed in runtime
-   * \return true if the preprocess successed, otherwise false
+   * \return true if the preprocess succeeded, otherwise false
    */
   virtual bool Apply(FDMatBatch *image_batch, std::vector<FDTensor> *outputs);
 

+ 2 - 2
libs/ultra-infer/ultra_infer/vision/ocr/ppocr/ppocr_v2.h

@@ -69,7 +69,7 @@ public:
    *
    * \param[in] im The input image data, comes from cv::imread(), is a 3-D array
    * with layout HWC, BGR format. \param[in] result The output OCR result will
-   * be written to this structure. \return true if the prediction successed,
+   * be written to this structure. \return true if the prediction succeeded,
    * otherwise false.
    */
   virtual bool Predict(cv::Mat *img, ultra_infer::vision::OCRResult *result);
@@ -80,7 +80,7 @@ public:
    * \param[in] images The list of input image data, comes from cv::imread(), is
    * a 3-D array with layout HWC, BGR format. \param[in] batch_result The output
    * list of OCR result will be written to this structure. \return true if the
-   * prediction successed, otherwise false.
+   * prediction succeeded, otherwise false.
    */
   virtual bool
   BatchPredict(const std::vector<cv::Mat> &images,

+ 2 - 2
libs/ultra-infer/ultra_infer/vision/ocr/ppocr/ppstructurev2_table.h

@@ -61,7 +61,7 @@ public:
    *
    * \param[in] im The input image data, comes from cv::imread(), is a 3-D array
    * with layout HWC, BGR format. \param[in] result The output OCR result will
-   * be written to this structure. \return true if the prediction successed,
+   * be written to this structure. \return true if the prediction succeeded,
    * otherwise false.
    */
   virtual bool Predict(cv::Mat *img, ultra_infer::vision::OCRResult *result);
@@ -72,7 +72,7 @@ public:
    * \param[in] images The list of input image data, comes from cv::imread(), is
    * a 3-D array with layout HWC, BGR format. \param[in] batch_result The output
    * list of OCR result will be written to this structure. \return true if the
-   * prediction successed, otherwise false.
+   * prediction succeeded, otherwise false.
    */
   virtual bool
   BatchPredict(const std::vector<cv::Mat> &images,

+ 1 - 1
libs/ultra-infer/ultra_infer/vision/ocr/ppocr/rec_postprocessor.h

@@ -37,7 +37,7 @@ public:
    * \param[in] tensors The inference result from runtime
    * \param[in] texts The output text results of recognizer
    * \param[in] rec_scores The output score results of recognizer
-   * \return true if the postprocess successed, otherwise false
+   * \return true if the postprocess succeeded, otherwise false
    */
   bool Run(const std::vector<FDTensor> &tensors,
            std::vector<std::string> *texts, std::vector<float> *rec_scores);

+ 2 - 2
libs/ultra-infer/ultra_infer/vision/ocr/ppocr/rec_preprocessor.h

@@ -31,7 +31,7 @@ public:
    *
    * \param[in] images The input data list, all the elements are FDMat
    * \param[in] outputs The output tensors which will be fed into runtime
-   * \return true if the preprocess successed, otherwise false
+   * \return true if the preprocess succeeded, otherwise false
    */
   bool Run(std::vector<FDMat> *images, std::vector<FDTensor> *outputs,
            size_t start_index, size_t end_index,
@@ -43,7 +43,7 @@ public:
    *
    * \param[in] image_batch The input image batch
    * \param[in] outputs The output tensors which will feed in runtime
-   * \return true if the preprocess successed, otherwise false
+   * \return true if the preprocess succeeded, otherwise false
    */
   virtual bool Apply(FDMatBatch *image_batch, std::vector<FDTensor> *outputs);
 

+ 4 - 4
libs/ultra-infer/ultra_infer/vision/ocr/ppocr/recognizer.h

@@ -65,7 +65,7 @@ public:
    * array with layout HWC, BGR format. \param[in] text The text result of rec
    * model will be written into this parameter. \param[in] rec_score The sccore
    * result of rec model will be written into this parameter. \return true if
-   * the prediction is successed, otherwise false.
+   * the prediction is succeeded, otherwise false.
    */
   virtual bool Predict(const cv::Mat &img, std::string *text, float *rec_score);
 
@@ -74,7 +74,7 @@ public:
    * \param[in] img The input image data, comes from cv::imread(), is a 3-D
    * array with layout HWC, BGR format. \param[in] ocr_result The output of OCR
    * recognition model result will be written to this structure. \return true if
-   * the prediction is successed, otherwise false.
+   * the prediction is succeeded, otherwise false.
    */
   virtual bool Predict(const cv::Mat &img, vision::OCRResult *ocr_result);
 
@@ -83,7 +83,7 @@ public:
    * \param[in] images The list of input image data, comes from cv::imread(), is
    * a 3-D array with layout HWC, BGR format. \param[in] ocr_result The output
    * of OCR recognition model result will be written to this structure. \return
-   * true if the prediction is successed, otherwise false.
+   * true if the prediction is succeeded, otherwise false.
    */
   virtual bool BatchPredict(const std::vector<cv::Mat> &images,
                             vision::OCRResult *ocr_result);
@@ -94,7 +94,7 @@ public:
    * a 3-D array with layout HWC, BGR format. \param[in] texts The list of text
    * results of rec model will be written into this vector. \param[in]
    * rec_scores The list of sccore result of rec model will be written into this
-   * vector. \return true if the prediction is successed, otherwise false.
+   * vector. \return true if the prediction is succeeded, otherwise false.
    */
   virtual bool BatchPredict(const std::vector<cv::Mat> &images,
                             std::vector<std::string> *texts,

+ 3 - 3
libs/ultra-infer/ultra_infer/vision/ocr/ppocr/structurev2_layout.h

@@ -60,21 +60,21 @@ public:
    *
    * \param[in] im The input image data, comes from cv::imread(), is a 3-D array
    * with layout HWC, BGR format \param[in] result The output detection result
-   * \return true if the prediction successed, otherwise false
+   * \return true if the prediction succeeded, otherwise false
    */
   virtual bool Predict(cv::Mat *im, DetectionResult *result);
 
   /** \brief Predict the detection result for an input image
    * \param[in] im The input image data, comes from cv::imread(), is a 3-D array
    * with layout HWC, BGR format \param[in] result The output detection result
-   * \return true if the prediction successed, otherwise false
+   * \return true if the prediction succeeded, otherwise false
    */
   virtual bool Predict(const cv::Mat &im, DetectionResult *result);
 
   /** \brief Predict the detection result for an input image list
    * \param[in] im The input image list, all the elements come from
    * cv::imread(), is a 3-D array with layout HWC, BGR format \param[in] results
-   * The output detection result list \return true if the prediction successed,
+   * The output detection result list \return true if the prediction succeeded,
    * otherwise false
    */
   virtual bool BatchPredict(const std::vector<cv::Mat> &imgs,

Některé soubory nejsou zobrazeny, neboť je v těchto rozdílových datech změněno mnoho souborů