Преглед изворни кода

Fix typos in multiple files (#3982)

co63oc пре 6 месеци
родитељ
комит
eb36f9ee42
95 измењених фајлова са 140 додато и 140 уклоњено
  1. 1 1
      docs/pipeline_usage/tutorials/cv_pipelines/3d_bev_detection.en.md
  2. 1 1
      docs/pipeline_usage/tutorials/ocr_pipelines/formula_recognition.en.md
  3. 1 1
      libs/ultra-infer/python/ultra_infer/vision/classification/contrib/resnet.py
  4. 1 1
      libs/ultra-infer/python/ultra_infer/vision/classification/contrib/yolov5cls.py
  5. 1 1
      libs/ultra-infer/python/ultra_infer/vision/detection/contrib/fastestdet.py
  6. 3 3
      libs/ultra-infer/python/ultra_infer/vision/detection/contrib/nanodet_plus.py
  7. 1 1
      libs/ultra-infer/python/ultra_infer/vision/detection/contrib/rkyolo/rkyolov5.py
  8. 3 3
      libs/ultra-infer/python/ultra_infer/vision/detection/contrib/scaled_yolov4.py
  9. 3 3
      libs/ultra-infer/python/ultra_infer/vision/detection/contrib/yolor.py
  10. 1 1
      libs/ultra-infer/python/ultra_infer/vision/detection/contrib/yolov5.py
  11. 3 3
      libs/ultra-infer/python/ultra_infer/vision/detection/contrib/yolov5lite.py
  12. 1 1
      libs/ultra-infer/python/ultra_infer/vision/detection/contrib/yolov5seg.py
  13. 3 3
      libs/ultra-infer/python/ultra_infer/vision/detection/contrib/yolov6.py
  14. 1 1
      libs/ultra-infer/python/ultra_infer/vision/detection/contrib/yolov7.py
  15. 2 2
      libs/ultra-infer/python/ultra_infer/vision/detection/contrib/yolov7end2end_ort.py
  16. 2 2
      libs/ultra-infer/python/ultra_infer/vision/detection/contrib/yolov7end2end_trt.py
  17. 1 1
      libs/ultra-infer/python/ultra_infer/vision/detection/contrib/yolov8.py
  18. 3 3
      libs/ultra-infer/python/ultra_infer/vision/detection/contrib/yolox.py
  19. 1 1
      libs/ultra-infer/python/ultra_infer/vision/evaluation/detection.py
  20. 1 1
      libs/ultra-infer/python/ultra_infer/vision/facealign/contrib/face_landmark_1000.py
  21. 1 1
      libs/ultra-infer/python/ultra_infer/vision/facealign/contrib/pfld.py
  22. 1 1
      libs/ultra-infer/python/ultra_infer/vision/facealign/contrib/pipnet.py
  23. 1 1
      libs/ultra-infer/python/ultra_infer/vision/facedet/contrib/centerface.py
  24. 3 3
      libs/ultra-infer/python/ultra_infer/vision/facedet/contrib/retinaface.py
  25. 3 3
      libs/ultra-infer/python/ultra_infer/vision/facedet/contrib/scrfd.py
  26. 3 3
      libs/ultra-infer/python/ultra_infer/vision/facedet/contrib/ultraface.py
  27. 3 3
      libs/ultra-infer/python/ultra_infer/vision/facedet/contrib/yolov5face.py
  28. 1 1
      libs/ultra-infer/python/ultra_infer/vision/facedet/contrib/yolov7face.py
  29. 1 1
      libs/ultra-infer/python/ultra_infer/vision/headpose/contrib/fsanet.py
  30. 3 3
      libs/ultra-infer/python/ultra_infer/vision/matting/contrib/modnet.py
  31. 1 1
      libs/ultra-infer/python/ultra_infer/vision/matting/contrib/rvm.py
  32. 2 2
      libs/ultra-infer/ultra_infer/runtime/backends/paddle/ops/grid_sample_3d.cu
  33. 2 2
      libs/ultra-infer/ultra_infer/runtime/backends/tensorrt/option.h
  34. 1 1
      libs/ultra-infer/ultra_infer/runtime/backends/tensorrt/trt_backend.cc
  35. 1 1
      libs/ultra-infer/ultra_infer/ultra_infer_model.cc
  36. 1 1
      libs/ultra-infer/ultra_infer/vision/common/processors/resize_by_short.h
  37. 1 1
      libs/ultra-infer/ultra_infer/vision/detection/contrib/nanodet_plus.h
  38. 1 1
      libs/ultra-infer/ultra_infer/vision/detection/contrib/scaledyolov4.h
  39. 2 2
      libs/ultra-infer/ultra_infer/vision/detection/contrib/yolor.h
  40. 1 1
      libs/ultra-infer/ultra_infer/vision/detection/contrib/yolov5/yolov5.h
  41. 1 1
      libs/ultra-infer/ultra_infer/vision/detection/contrib/yolov5lite.h
  42. 1 1
      libs/ultra-infer/ultra_infer/vision/detection/contrib/yolov6.h
  43. 1 1
      libs/ultra-infer/ultra_infer/vision/detection/contrib/yolov7/yolov7.h
  44. 1 1
      libs/ultra-infer/ultra_infer/vision/detection/contrib/yolox.h
  45. 1 1
      libs/ultra-infer/ultra_infer/vision/facedet/contrib/retinaface.h
  46. 1 1
      libs/ultra-infer/ultra_infer/vision/facedet/contrib/scrfd.h
  47. 1 1
      libs/ultra-infer/ultra_infer/vision/facedet/contrib/ultraface.h
  48. 1 1
      libs/ultra-infer/ultra_infer/vision/facedet/contrib/yolov5face.h
  49. 1 1
      paddlex/inference/common/result/mixin.py
  50. 2 2
      paddlex/inference/models/common/tokenizer/tokenizer_utils.py
  51. 1 1
      paddlex/inference/models/text_detection/processors.py
  52. 1 1
      paddlex/inference/pipelines/components/faisser.py
  53. 2 2
      paddlex/inference/pipelines/layout_parsing/pipeline_v2.py
  54. 1 1
      paddlex/inference/pipelines/layout_parsing/result_v2.py
  55. 2 2
      paddlex/inference/pipelines/layout_parsing/xycut_enhanced/utils.py
  56. 1 1
      paddlex/inference/utils/pp_option.py
  57. 4 4
      paddlex/modules/object_detection/dataset_checker/dataset_src/convert_dataset.py
  58. 1 1
      paddlex/modules/ts_classification/dataset_checker/dataset_src/split_dataset.py
  59. 2 2
      paddlex/repo_apis/Paddle3D_api/bev_fusion/model.py
  60. 1 1
      paddlex/repo_apis/PaddleClas_api/cls/config.py
  61. 1 1
      paddlex/repo_apis/PaddleClas_api/cls/model.py
  62. 1 1
      paddlex/repo_apis/PaddleClas_api/cls/runner.py
  63. 1 1
      paddlex/repo_apis/PaddleClas_api/configs/PP-LCNetV2_base.yaml
  64. 1 1
      paddlex/repo_apis/PaddleClas_api/configs/PP-LCNetV2_large.yaml
  65. 1 1
      paddlex/repo_apis/PaddleClas_api/configs/PP-LCNetV2_small.yaml
  66. 1 1
      paddlex/repo_apis/PaddleDetection_api/instance_seg/model.py
  67. 1 1
      paddlex/repo_apis/PaddleDetection_api/instance_seg/runner.py
  68. 1 1
      paddlex/repo_apis/PaddleDetection_api/object_det/config.py
  69. 1 1
      paddlex/repo_apis/PaddleDetection_api/object_det/model.py
  70. 1 1
      paddlex/repo_apis/PaddleDetection_api/object_det/runner.py
  71. 3 3
      paddlex/repo_apis/PaddleOCR_api/formula_rec/config.py
  72. 1 1
      paddlex/repo_apis/PaddleOCR_api/formula_rec/model.py
  73. 1 1
      paddlex/repo_apis/PaddleOCR_api/formula_rec/runner.py
  74. 1 1
      paddlex/repo_apis/PaddleOCR_api/table_rec/model.py
  75. 1 1
      paddlex/repo_apis/PaddleOCR_api/table_rec/runner.py
  76. 1 1
      paddlex/repo_apis/PaddleOCR_api/text_det/model.py
  77. 1 1
      paddlex/repo_apis/PaddleOCR_api/text_det/runner.py
  78. 3 3
      paddlex/repo_apis/PaddleOCR_api/text_rec/config.py
  79. 1 1
      paddlex/repo_apis/PaddleOCR_api/text_rec/model.py
  80. 1 1
      paddlex/repo_apis/PaddleOCR_api/text_rec/runner.py
  81. 1 1
      paddlex/repo_apis/PaddleSeg_api/seg/model.py
  82. 1 1
      paddlex/repo_apis/PaddleSeg_api/seg/runner.py
  83. 3 3
      paddlex/repo_apis/PaddleVideo_api/configs/PP-TSM-R50_8frames_uniform.yaml
  84. 3 3
      paddlex/repo_apis/PaddleVideo_api/configs/PP-TSMv2-LCNetV2_16frames_uniform.yaml
  85. 3 3
      paddlex/repo_apis/PaddleVideo_api/configs/PP-TSMv2-LCNetV2_8frames_uniform.yaml
  86. 1 1
      paddlex/repo_apis/PaddleVideo_api/video_cls/config.py
  87. 1 1
      paddlex/repo_apis/PaddleVideo_api/video_cls/model.py
  88. 1 1
      paddlex/repo_apis/PaddleVideo_api/video_cls/runner.py
  89. 1 1
      paddlex/repo_apis/PaddleVideo_api/video_det/config.py
  90. 1 1
      paddlex/repo_apis/PaddleVideo_api/video_det/model.py
  91. 1 1
      paddlex/repo_apis/PaddleVideo_api/video_det/runner.py
  92. 1 1
      paddlex/repo_apis/base/config.py
  93. 2 2
      paddlex/repo_manager/core.py
  94. 1 1
      paddlex/repo_manager/repo.py
  95. 1 1
      paddlex/utils/misc.py

+ 1 - 1
docs/pipeline_usage/tutorials/cv_pipelines/3d_bev_detection.en.md

@@ -109,7 +109,7 @@ Parameter description:
 ```
 --pipeline: The name of the pipeline, here it is the 3D multi-modal fusion detection pipeline.
 
---input: The input path to the .tar file containing image and lidar data to be processed. 3D multi-modal fusion detection pipeline is a multi-input pipeline depending on images, pointclouds and transition matrix information. Tar file contains "samples" directory with all images and pointclouds data, "sweeps" directories with pointclouds data of relative frames and nuscnes_infos_val.pkl file containing relataive data path from "samples" and "sweeps" directories and transition matrix infomation.
+--input: The input path to the .tar file containing image and lidar data to be processed. 3D multi-modal fusion detection pipeline is a multi-input pipeline depending on images, pointclouds and transition matrix information. Tar file contains "samples" directory with all images and pointclouds data, "sweeps" directories with pointclouds data of relative frames and nuscnes_infos_val.pkl file containing relative data path from "samples" and "sweeps" directories and transition matrix information.
 
 --device: The GPU index to be used (e.g., gpu:0 means using the 0th GPU, gpu:1,2 means using the 1st and 2nd GPUs), or you can choose to use CPU (--device cpu).
 ```

+ 1 - 1
docs/pipeline_usage/tutorials/ocr_pipelines/formula_recognition.en.md

@@ -292,7 +292,7 @@ You can [experience the formula recognition pipeline online](https://aistudio.ba
 If you are satisfied with the performance of the pipeline, you can directly integrate and deploy it. You can choose to download the deployment package from the cloud, or refer to the methods in [Section 2.2 Local Experience](#22-local-experience) for local deployment. If you are not satisfied with the effect, you can <b>fine-tune the models in the pipeline using your private data</b>. If you have local hardware resources for training, you can start training directly on your local machine; if not, the Star River Zero-Code platform provides a one-click training service. You don't need to write any code—just upload your data and start the training task with one click.
 
 ### 2.2 Local Experience
-> ❗ Before using the formula recognition pipelin locally, please ensure that you have completed the installation of the PaddleX wheel package according to the [PaddleX Installation Guide](../../../installation/installation.en.md). If you wish to selectively install dependencies, please refer to the relevant instructions in the installation guide. The dependency group corresponding to this pipeline is `ocr`.
+> ❗ Before using the formula recognition pipeline locally, please ensure that you have completed the installation of the PaddleX wheel package according to the [PaddleX Installation Guide](../../../installation/installation.en.md). If you wish to selectively install dependencies, please refer to the relevant instructions in the installation guide. The dependency group corresponding to this pipeline is `ocr`.
 
 #### 2.2.1 Command Line Experience
 You can quickly experience the effect of the formula recognition pipeline with one command. Use the [test file](https://paddle-model-ecology.bj.bcebos.com/paddlex/demo_image/pipelines/general_formula_recognition_001.png), and replace `--input` with the local path for prediction.

+ 1 - 1
libs/ultra-infer/python/ultra_infer/vision/classification/contrib/resnet.py

@@ -84,7 +84,7 @@ class ResNet(UltraInferModel):
         ), "The value to set `size` must be type of tuple or list."
         assert (
             len(wh) == 2
-        ), "The value to set `size` must contatins 2 elements means [width, height], but now it contains {} elements.".format(
+        ), "The value to set `size` must contains 2 elements means [width, height], but now it contains {} elements.".format(
             len(wh)
         )
         self._model.size = wh

+ 1 - 1
libs/ultra-infer/python/ultra_infer/vision/classification/contrib/yolov5cls.py

@@ -45,7 +45,7 @@ class YOLOv5ClsPreprocessor:
         ), "The value to set `size` must be type of tuple or list."
         assert (
             len(wh) == 2
-        ), "The value to set `size` must contatins 2 elements means [width, height], but now it contains {} elements.".format(
+        ), "The value to set `size` must contains 2 elements means [width, height], but now it contains {} elements.".format(
             len(wh)
         )
         self._preprocessor.size = wh

+ 1 - 1
libs/ultra-infer/python/ultra_infer/vision/detection/contrib/fastestdet.py

@@ -45,7 +45,7 @@ class FastestDetPreprocessor:
         ), "The value to set `size` must be type of tuple or list."
         assert (
             len(wh) == 2
-        ), "The value to set `size` must contatins 2 elements means [width, height], but now it contains {} elements.".format(
+        ), "The value to set `size` must contains 2 elements means [width, height], but now it contains {} elements.".format(
             len(wh)
         )
         self._preprocessor.size = wh

+ 3 - 3
libs/ultra-infer/python/ultra_infer/vision/detection/contrib/nanodet_plus.py

@@ -47,8 +47,8 @@ class NanoDetPlus(UltraInferModel):
         """Detect an input image
 
         :param input_image: (numpy.ndarray)The input image data, 3-D array with layout HWC, BGR format
-        :param conf_threshold: confidence threashold for postprocessing, default is 0.25
-        :param nms_iou_threshold: iou threashold for NMS, default is 0.5
+        :param conf_threshold: confidence threshold for postprocessing, default is 0.25
+        :param nms_iou_threshold: iou threshold for NMS, default is 0.5
         :return: DetectionResult
         """
         return self._model.predict(input_image, conf_threshold, nms_iou_threshold)
@@ -96,7 +96,7 @@ class NanoDetPlus(UltraInferModel):
         ), "The value to set `size` must be type of tuple or list."
         assert (
             len(wh) == 2
-        ), "The value to set `size` must contatins 2 elements means [width, height], but now it contains {} elements.".format(
+        ), "The value to set `size` must contains 2 elements means [width, height], but now it contains {} elements.".format(
             len(wh)
         )
         self._model.size = wh

+ 1 - 1
libs/ultra-infer/python/ultra_infer/vision/detection/contrib/rkyolo/rkyolov5.py

@@ -60,7 +60,7 @@ class RKYOLOPreprocessor:
         ), "The value to set `size` must be type of tuple or list."
         assert (
             len(wh) == 2
-        ), "The value to set `size` must contatins 2 elements means [width, height], but now it contains {} elements.".format(
+        ), "The value to set `size` must contains 2 elements means [width, height], but now it contains {} elements.".format(
             len(wh)
         )
         self._preprocessor.size = wh

+ 3 - 3
libs/ultra-infer/python/ultra_infer/vision/detection/contrib/scaled_yolov4.py

@@ -47,8 +47,8 @@ class ScaledYOLOv4(UltraInferModel):
         """Detect an input image
 
         :param input_image: (numpy.ndarray)The input image data, 3-D array with layout HWC, BGR format
-        :param conf_threshold: confidence threashold for postprocessing, default is 0.25
-        :param nms_iou_threshold: iou threashold for NMS, default is 0.5
+        :param conf_threshold: confidence threshold for postprocessing, default is 0.25
+        :param nms_iou_threshold: iou threshold for NMS, default is 0.5
         :return: DetectionResult
         """
         return self._model.predict(input_image, conf_threshold, nms_iou_threshold)
@@ -100,7 +100,7 @@ class ScaledYOLOv4(UltraInferModel):
         ), "The value to set `size` must be type of tuple or list."
         assert (
             len(wh) == 2
-        ), "The value to set `size` must contatins 2 elements means [width, height], but now it contains {} elements.".format(
+        ), "The value to set `size` must contains 2 elements means [width, height], but now it contains {} elements.".format(
             len(wh)
         )
         self._model.size = wh

+ 3 - 3
libs/ultra-infer/python/ultra_infer/vision/detection/contrib/yolor.py

@@ -47,8 +47,8 @@ class YOLOR(UltraInferModel):
         """Detect an input image
 
         :param input_image: (numpy.ndarray)The input image data, 3-D array with layout HWC, BGR format
-        :param conf_threshold: confidence threashold for postprocessing, default is 0.25
-        :param nms_iou_threshold: iou threashold for NMS, default is 0.5
+        :param conf_threshold: confidence threshold for postprocessing, default is 0.25
+        :param nms_iou_threshold: iou threshold for NMS, default is 0.5
         :return: DetectionResult
         """
         return self._model.predict(input_image, conf_threshold, nms_iou_threshold)
@@ -99,7 +99,7 @@ class YOLOR(UltraInferModel):
         ), "The value to set `size` must be type of tuple or list."
         assert (
             len(wh) == 2
-        ), "The value to set `size` must contatins 2 elements means [width, height], but now it contains {} elements.".format(
+        ), "The value to set `size` must contains 2 elements means [width, height], but now it contains {} elements.".format(
             len(wh)
         )
         self._model.size = wh

+ 1 - 1
libs/ultra-infer/python/ultra_infer/vision/detection/contrib/yolov5.py

@@ -74,7 +74,7 @@ class YOLOv5Preprocessor:
         ), "The value to set `size` must be type of tuple or list."
         assert (
             len(wh) == 2
-        ), "The value to set `size` must contatins 2 elements means [width, height], but now it contains {} elements.".format(
+        ), "The value to set `size` must contains 2 elements means [width, height], but now it contains {} elements.".format(
             len(wh)
         )
         self._preprocessor.size = wh

+ 3 - 3
libs/ultra-infer/python/ultra_infer/vision/detection/contrib/yolov5lite.py

@@ -47,8 +47,8 @@ class YOLOv5Lite(UltraInferModel):
         """Detect an input image
 
         :param input_image: (numpy.ndarray)The input image data, 3-D array with layout HWC, BGR format
-        :param conf_threshold: confidence threashold for postprocessing, default is 0.25
-        :param nms_iou_threshold: iou threashold for NMS, default is 0.5
+        :param conf_threshold: confidence threshold for postprocessing, default is 0.25
+        :param nms_iou_threshold: iou threshold for NMS, default is 0.5
         :return: DetectionResult
         """
         return self._model.predict(input_image, conf_threshold, nms_iou_threshold)
@@ -121,7 +121,7 @@ class YOLOv5Lite(UltraInferModel):
         ), "The value to set `size` must be type of tuple or list."
         assert (
             len(wh) == 2
-        ), "The value to set `size` must contatins 2 elements means [width, height], but now it contains {} elements.".format(
+        ), "The value to set `size` must contains 2 elements means [width, height], but now it contains {} elements.".format(
             len(wh)
         )
         self._model.size = wh

+ 1 - 1
libs/ultra-infer/python/ultra_infer/vision/detection/contrib/yolov5seg.py

@@ -74,7 +74,7 @@ class YOLOv5SegPreprocessor:
         ), "The value to set `size` must be type of tuple or list."
         assert (
             len(wh) == 2
-        ), "The value to set `size` must contatins 2 elements means [width, height], but now it contains {} elements.".format(
+        ), "The value to set `size` must contains 2 elements means [width, height], but now it contains {} elements.".format(
             len(wh)
         )
         self._preprocessor.size = wh

+ 3 - 3
libs/ultra-infer/python/ultra_infer/vision/detection/contrib/yolov6.py

@@ -47,8 +47,8 @@ class YOLOv6(UltraInferModel):
         """Detect an input image
 
         :param input_image: (numpy.ndarray)The input image data, 3-D array with layout HWC, BGR format
-        :param conf_threshold: confidence threashold for postprocessing, default is 0.25
-        :param nms_iou_threshold: iou threashold for NMS, default is 0.5
+        :param conf_threshold: confidence threshold for postprocessing, default is 0.25
+        :param nms_iou_threshold: iou threshold for NMS, default is 0.5
         :return: DetectionResult
         """
         return self._model.predict(input_image, conf_threshold, nms_iou_threshold)
@@ -99,7 +99,7 @@ class YOLOv6(UltraInferModel):
         ), "The value to set `size` must be type of tuple or list."
         assert (
             len(wh) == 2
-        ), "The value to set `size` must contatins 2 elements means [width, height], but now it contains {} elements.".format(
+        ), "The value to set `size` must contains 2 elements means [width, height], but now it contains {} elements.".format(
             len(wh)
         )
         self._model.size = wh

+ 1 - 1
libs/ultra-infer/python/ultra_infer/vision/detection/contrib/yolov7.py

@@ -60,7 +60,7 @@ class YOLOv7Preprocessor:
         ), "The value to set `size` must be type of tuple or list."
         assert (
             len(wh) == 2
-        ), "The value to set `size` must contatins 2 elements means [width, height], but now it contains {} elements.".format(
+        ), "The value to set `size` must contains 2 elements means [width, height], but now it contains {} elements.".format(
             len(wh)
         )
         self._preprocessor.size = wh

+ 2 - 2
libs/ultra-infer/python/ultra_infer/vision/detection/contrib/yolov7end2end_ort.py

@@ -47,7 +47,7 @@ class YOLOv7End2EndORT(UltraInferModel):
         """Detect an input image
 
         :param input_image: (numpy.ndarray)The input image data, 3-D array with layout HWC, BGR format
-        :param conf_threshold: confidence threashold for postprocessing, default is 0.25
+        :param conf_threshold: confidence threshold for postprocessing, default is 0.25
         :return: DetectionResult
         """
         return self._model.predict(input_image, conf_threshold)
@@ -93,7 +93,7 @@ class YOLOv7End2EndORT(UltraInferModel):
         ), "The value to set `size` must be type of tuple or list."
         assert (
             len(wh) == 2
-        ), "The value to set `size` must contatins 2 elements means [width, height], but now it contains {} elements.".format(
+        ), "The value to set `size` must contains 2 elements means [width, height], but now it contains {} elements.".format(
             len(wh)
         )
         self._model.size = wh

+ 2 - 2
libs/ultra-infer/python/ultra_infer/vision/detection/contrib/yolov7end2end_trt.py

@@ -47,7 +47,7 @@ class YOLOv7End2EndTRT(UltraInferModel):
         """Detect an input image
 
         :param input_image: (numpy.ndarray)The input image data, 3-D array with layout HWC, BGR format
-        :param conf_threshold: confidence threashold for postprocessing, default is 0.25
+        :param conf_threshold: confidence threshold for postprocessing, default is 0.25
         :return: DetectionResult
         """
         return self._model.predict(input_image, conf_threshold)
@@ -93,7 +93,7 @@ class YOLOv7End2EndTRT(UltraInferModel):
         ), "The value to set `size` must be type of tuple or list."
         assert (
             len(wh) == 2
-        ), "The value to set `size` must contatins 2 elements means [width, height], but now it contains {} elements.".format(
+        ), "The value to set `size` must contains 2 elements means [width, height], but now it contains {} elements.".format(
             len(wh)
         )
         self._model.size = wh

+ 1 - 1
libs/ultra-infer/python/ultra_infer/vision/detection/contrib/yolov8.py

@@ -74,7 +74,7 @@ class YOLOv8Preprocessor:
         ), "The value to set `size` must be type of tuple or list."
         assert (
             len(wh) == 2
-        ), "The value to set `size` must contatins 2 elements means [width, height], but now it contains {} elements.".format(
+        ), "The value to set `size` must contains 2 elements means [width, height], but now it contains {} elements.".format(
             len(wh)
         )
         self._preprocessor.size = wh

+ 3 - 3
libs/ultra-infer/python/ultra_infer/vision/detection/contrib/yolox.py

@@ -47,8 +47,8 @@ class YOLOX(UltraInferModel):
         """Detect an input image
 
         :param input_image: (numpy.ndarray)The input image data, 3-D array with layout HWC, BGR format
-        :param conf_threshold: confidence threashold for postprocessing, default is 0.25
-        :param nms_iou_threshold: iou threashold for NMS, default is 0.5
+        :param conf_threshold: confidence threshold for postprocessing, default is 0.25
+        :param nms_iou_threshold: iou threshold for NMS, default is 0.5
         :return: DetectionResult
         """
         return self._model.predict(input_image, conf_threshold, nms_iou_threshold)
@@ -96,7 +96,7 @@ class YOLOX(UltraInferModel):
         ), "The value to set `size` must be type of tuple or list."
         assert (
             len(wh) == 2
-        ), "The value to set `size` must contatins 2 elements means [width, height], but now it contains {} elements.".format(
+        ), "The value to set `size` must contains 2 elements means [width, height], but now it contains {} elements.".format(
             len(wh)
         )
         self._model.size = wh

+ 1 - 1
libs/ultra-infer/python/ultra_infer/vision/evaluation/detection.py

@@ -35,7 +35,7 @@ def eval_detection(
     if conf_threshold is not None or nms_iou_threshold is not None:
         assert (
             conf_threshold is not None and nms_iou_threshold is not None
-        ), "The conf_threshold and nms_iou_threshold should be setted at the same time"
+        ), "The conf_threshold and nms_iou_threshold should be set at the same time"
         assert isinstance(
             conf_threshold, (float, int)
         ), "The conf_threshold:{} need to be int or float".format(conf_threshold)

+ 1 - 1
libs/ultra-infer/python/ultra_infer/vision/facealign/contrib/face_landmark_1000.py

@@ -70,7 +70,7 @@ class FaceLandmark1000(UltraInferModel):
         ), "The value to set `size` must be type of tuple or list."
         assert (
             len(wh) == 2
-        ), "The value to set `size` must contatins 2 elements means [width, height], but now it contains {} elements.".format(
+        ), "The value to set `size` must contains 2 elements means [width, height], but now it contains {} elements.".format(
             len(wh)
         )
         self._model.size = wh

+ 1 - 1
libs/ultra-infer/python/ultra_infer/vision/facealign/contrib/pfld.py

@@ -70,7 +70,7 @@ class PFLD(UltraInferModel):
         ), "The value to set `size` must be type of tuple or list."
         assert (
             len(wh) == 2
-        ), "The value to set `size` must contatins 2 elements means [width, height], but now it contains {} elements.".format(
+        ), "The value to set `size` must contains 2 elements means [width, height], but now it contains {} elements.".format(
             len(wh)
         )
         self._model.size = wh

+ 1 - 1
libs/ultra-infer/python/ultra_infer/vision/facealign/contrib/pipnet.py

@@ -91,7 +91,7 @@ class PIPNet(UltraInferModel):
         ), "The value to set `size` must be type of tuple or list."
         assert (
             len(wh) == 2
-        ), "The value to set `size` must contatins 2 elements means [width, height], but now it contains {} elements.".format(
+        ), "The value to set `size` must contains 2 elements means [width, height], but now it contains {} elements.".format(
             len(wh)
         )
         self._model.size = wh

+ 1 - 1
libs/ultra-infer/python/ultra_infer/vision/facedet/contrib/centerface.py

@@ -45,7 +45,7 @@ class CenterFacePreprocessor:
         ), "The value to set `size` must be type of tuple or list."
         assert (
             len(wh) == 2
-        ), "The value to set `size` must contatins 2 elements means [width, height], but now it contains {} elements.".format(
+        ), "The value to set `size` must contains 2 elements means [width, height], but now it contains {} elements.".format(
             len(wh)
         )
         self._preprocessor.size = wh

+ 3 - 3
libs/ultra-infer/python/ultra_infer/vision/facedet/contrib/retinaface.py

@@ -47,8 +47,8 @@ class RetinaFace(UltraInferModel):
         """Detect the location and key points of human faces from an input image
 
         :param input_image: (numpy.ndarray)The input image data, 3-D array with layout HWC, BGR format
-        :param conf_threshold: confidence threashold for postprocessing, default is 0.7
-        :param nms_iou_threshold: iou threashold for NMS, default is 0.3
+        :param conf_threshold: confidence threshold for postprocessing, default is 0.7
+        :param nms_iou_threshold: iou threshold for NMS, default is 0.3
         :return: FaceDetectionResult
         """
         return self._model.predict(input_image, conf_threshold, nms_iou_threshold)
@@ -97,7 +97,7 @@ class RetinaFace(UltraInferModel):
         ), "The value to set `size` must be type of tuple or list."
         assert (
             len(wh) == 2
-        ), "The value to set `size` must contatins 2 elements means [width, height], but now it contains {} elements.".format(
+        ), "The value to set `size` must contains 2 elements means [width, height], but now it contains {} elements.".format(
             len(wh)
         )
         self._model.size = wh

+ 3 - 3
libs/ultra-infer/python/ultra_infer/vision/facedet/contrib/scrfd.py

@@ -47,8 +47,8 @@ class SCRFD(UltraInferModel):
         """Detect the location and key points of human faces from an input image
 
         :param input_image: (numpy.ndarray)The input image data, 3-D array with layout HWC, BGR format
-        :param conf_threshold: confidence threashold for postprocessing, default is 0.7
-        :param nms_iou_threshold: iou threashold for NMS, default is 0.3
+        :param conf_threshold: confidence threshold for postprocessing, default is 0.7
+        :param nms_iou_threshold: iou threshold for NMS, default is 0.3
         :return: FaceDetectionResult
         """
         return self._model.predict(input_image, conf_threshold, nms_iou_threshold)
@@ -144,7 +144,7 @@ class SCRFD(UltraInferModel):
         ), "The value to set `size` must be type of tuple or list."
         assert (
             len(wh) == 2
-        ), "The value to set `size` must contatins 2 elements means [width, height], but now it contains {} elements.".format(
+        ), "The value to set `size` must contains 2 elements means [width, height], but now it contains {} elements.".format(
             len(wh)
         )
         self._model.size = wh

+ 3 - 3
libs/ultra-infer/python/ultra_infer/vision/facedet/contrib/ultraface.py

@@ -47,8 +47,8 @@ class UltraFace(UltraInferModel):
         """Detect the location and key points of human faces from an input image
 
         :param input_image: (numpy.ndarray)The input image data, 3-D array with layout HWC, BGR format
-        :param conf_threshold: confidence threashold for postprocessing, default is 0.7
-        :param nms_iou_threshold: iou threashold for NMS, default is 0.3
+        :param conf_threshold: confidence threshold for postprocessing, default is 0.7
+        :param nms_iou_threshold: iou threshold for NMS, default is 0.3
         :return: FaceDetectionResult
         """
         return self._model.predict(input_image, conf_threshold, nms_iou_threshold)
@@ -69,7 +69,7 @@ class UltraFace(UltraInferModel):
         ), "The value to set `size` must be type of tuple or list."
         assert (
             len(wh) == 2
-        ), "The value to set `size` must contatins 2 elements means [width, height], but now it contains {} elements.".format(
+        ), "The value to set `size` must contains 2 elements means [width, height], but now it contains {} elements.".format(
             len(wh)
         )
         self._model.size = wh

+ 3 - 3
libs/ultra-infer/python/ultra_infer/vision/facedet/contrib/yolov5face.py

@@ -47,8 +47,8 @@ class YOLOv5Face(UltraInferModel):
         """Detect the location and key points of human faces from an input image
 
         :param input_image: (numpy.ndarray)The input image data, 3-D array with layout HWC, BGR format
-        :param conf_threshold: confidence threashold for postprocessing, default is 0.25
-        :param nms_iou_threshold: iou threashold for NMS, default is 0.5
+        :param conf_threshold: confidence threshold for postprocessing, default is 0.25
+        :param nms_iou_threshold: iou threshold for NMS, default is 0.5
         :return: FaceDetectionResult
         """
         return self._model.predict(input_image, conf_threshold, nms_iou_threshold)
@@ -101,7 +101,7 @@ class YOLOv5Face(UltraInferModel):
         ), "The value to set `size` must be type of tuple or list."
         assert (
             len(wh) == 2
-        ), "The value to set `size` must contatins 2 elements means [width, height], but now it contains {} elements.".format(
+        ), "The value to set `size` must contains 2 elements means [width, height], but now it contains {} elements.".format(
             len(wh)
         )
         self._model.size = wh

+ 1 - 1
libs/ultra-infer/python/ultra_infer/vision/facedet/contrib/yolov7face.py

@@ -60,7 +60,7 @@ class Yolov7FacePreprocessor:
         ), "The value to set `size` must be type of tuple or list."
         assert (
             len(wh) == 2
-        ), "The value to set `size` must contatins 2 elements means [width, height], but now it contains {} elements.".format(
+        ), "The value to set `size` must contains 2 elements means [width, height], but now it contains {} elements.".format(
             len(wh)
         )
         self._preprocessor.size = wh

+ 1 - 1
libs/ultra-infer/python/ultra_infer/vision/headpose/contrib/fsanet.py

@@ -70,7 +70,7 @@ class FSANet(UltraInferModel):
         ), "The value to set `size` must be type of tuple or list."
         assert (
             len(wh) == 2
-        ), "The value to set `size` must contatins 2 elements means [width, height], but now it contains {} elements.".format(
+        ), "The value to set `size` must contains 2 elements means [width, height], but now it contains {} elements.".format(
             len(wh)
         )
         self._model.size = wh

+ 3 - 3
libs/ultra-infer/python/ultra_infer/vision/matting/contrib/modnet.py

@@ -88,7 +88,7 @@ class MODNet(UltraInferModel):
         ), "The value to set `size` must be type of tuple or list."
         assert (
             len(wh) == 2
-        ), "The value to set `size` must contatins 2 elements means [width, height], but now it contains {} elements.".format(
+        ), "The value to set `size` must contains 2 elements means [width, height], but now it contains {} elements.".format(
             len(wh)
         )
         self._model.size = wh
@@ -100,7 +100,7 @@ class MODNet(UltraInferModel):
         ), "The value to set `alpha` must be type of tuple or list."
         assert (
             len(value) == 3
-        ), "The value to set `alpha` must contatins 3 elements for each channels, but now it contains {} elements.".format(
+        ), "The value to set `alpha` must contains 3 elements for each channels, but now it contains {} elements.".format(
             len(value)
         )
         self._model.alpha = value
@@ -112,7 +112,7 @@ class MODNet(UltraInferModel):
         ), "The value to set `beta` must be type of tuple or list."
         assert (
             len(value) == 3
-        ), "The value to set `beta` must contatins 3 elements for each channels, but now it contains {} elements.".format(
+        ), "The value to set `beta` must contains 3 elements for each channels, but now it contains {} elements.".format(
             len(value)
         )
         self._model.beta = value

+ 1 - 1
libs/ultra-infer/python/ultra_infer/vision/matting/contrib/rvm.py

@@ -79,7 +79,7 @@ class RobustVideoMatting(UltraInferModel):
         ), "The value to set `size` must be type of tuple or list."
         assert (
             len(wh) == 2
-        ), "The value to set `size` must contatins 2 elements means [width, height], but now it contains {} elements.".format(
+        ), "The value to set `size` must contains 2 elements means [width, height], but now it contains {} elements.".format(
             len(wh)
         )
         self._model.size = wh

+ 2 - 2
libs/ultra-infer/ultra_infer/runtime/backends/paddle/ops/grid_sample_3d.cu

@@ -126,7 +126,7 @@ GridSample3DCudaKernel(const index_t nthreads, index_t out_c, index_t out_d,
     const index_t n = index / (out_d * out_h * out_w);
     const index_t grid_offset =
         n * grid_sN + d * grid_sD + h * grid_sH + w * grid_sW;
-    // get the corresponding input x, y, z co-ordinates from grid
+    // get the corresponding input x, y, z coordinates from grid
     T ix = grid[grid_offset];
     T iy = grid[grid_offset + grid_sCoor];
     T iz = grid[grid_offset + 2 * grid_sCoor];
@@ -427,7 +427,7 @@ __global__ void GridSample3DCudaBackwardKernel(
     const auto grid_offset =
         n * grid_sN + d * grid_sD + h * grid_sH + w * grid_sW;
 
-    // get the corresponding input x, y, z co-ordinates from grid
+    // get the corresponding input x, y, z coordinates from grid
     T ix = grid[grid_offset];
     T iy = grid[grid_offset + grid_sCoor];
     T iz = grid[grid_offset + 2 * grid_sCoor];

+ 2 - 2
libs/ultra-infer/ultra_infer/runtime/backends/tensorrt/option.h

@@ -33,8 +33,8 @@ struct TrtBackendOption {
   /// Enable log while converting onnx model to tensorrt
   bool enable_log_info = false;
 
-  /// Enable half precison inference, on some device not support half precision,
-  /// it will fallback to float32 mode
+  /// Enable half precision inference, on some device not support half
+  /// precision, it will fallback to float32 mode
   bool enable_fp16 = false;
 
   /** \brief Set shape range of input tensor for the model that contain dynamic

+ 1 - 1
libs/ultra-infer/ultra_infer/runtime/backends/tensorrt/trt_backend.cc

@@ -32,7 +32,7 @@ FDTrtLogger *FDTrtLogger::logger = nullptr;
 // If the model has dynamic input shape, it will require defined shape
 // information We can set the shape range information by function
 // SetTrtInputShape() But if the shape range is not defined, then the engine
-// cannot build, in this case, The engine will build once there's data feeded,
+// cannot build, in this case, The engine will build once there's data fed,
 // and the shape range will be updated
 bool CanBuildEngine(
     const std::map<std::string, ShapeRangeInfo> &shape_range_info) {

+ 1 - 1
libs/ultra-infer/ultra_infer/ultra_infer_model.cc

@@ -206,7 +206,7 @@ bool UltraInferModel::InitRuntimeWithSpecifiedDevice() {
 
 bool UltraInferModel::InitRuntime() {
   if (runtime_initialized_) {
-    FDERROR << "The model is already initialized, cannot be initliazed again."
+    FDERROR << "The model is already initialized, cannot be initialized again."
             << std::endl;
     return false;
   }

+ 1 - 1
libs/ultra-infer/ultra_infer/vision/common/processors/resize_by_short.h

@@ -51,7 +51,7 @@ public:
    * \param[in] target_size target size of the output image.
    * \param[in] interp interpolation method, default is 1.
    * \param[in] use_scale to define whether to scale the image, default is
-   * true. \param[in] max_hw max HW fo output image. \param[in] lib to define
+   * true. \param[in] max_hw max HW of output image. \param[in] lib to define
    * OpenCV or FlyCV or CVCUDA will be used. \return true if the process
    * successed, otherwise false
    */

+ 1 - 1
libs/ultra-infer/ultra_infer/vision/detection/contrib/nanodet_plus.h

@@ -49,7 +49,7 @@ public:
    * \param[in] im The input image data, comes from cv::imread(), is a 3-D array
    * with layout HWC, BGR format \param[in] result The output detection result
    * will be written to this structure \param[in] conf_threshold confidence
-   * threashold for postprocessing, default is 0.35 \param[in] nms_iou_threshold
+   * threshold for postprocessing, default is 0.35 \param[in] nms_iou_threshold
    * iou threshold for NMS, default is 0.5 \return true if the prediction
    * successed, otherwise false
    */

+ 1 - 1
libs/ultra-infer/ultra_infer/vision/detection/contrib/scaledyolov4.h

@@ -46,7 +46,7 @@ public:
    * \param[in] im The input image data, comes from cv::imread(), is a 3-D array
    * with layout HWC, BGR format \param[in] result The output detection result
    * will be written to this structure \param[in] conf_threshold confidence
-   * threashold for postprocessing, default is 0.25 \param[in] nms_iou_threshold
+   * threshold for postprocessing, default is 0.25 \param[in] nms_iou_threshold
    * iou threshold for NMS, default is 0.5 \return true if the prediction
    * successed, otherwise false
    */

+ 2 - 2
libs/ultra-infer/ultra_infer/vision/detection/contrib/yolor.h

@@ -43,8 +43,8 @@ public:
    *
    * \param[in] im The input image data, comes from cv::imread()
    * \param[in] result The output detection result will be written to this
-   * structure \param[in] conf_threshold confidence threashold for
-   * postprocessing, default is 0.25 \param[in] nms_iou_threshold iou threashold
+   * structure \param[in] conf_threshold confidence threshold for
+   * postprocessing, default is 0.25 \param[in] nms_iou_threshold iou threshold
    * for NMS, default is 0.5 \return true if the prediction successed, otherwise
    * false
    */

+ 1 - 1
libs/ultra-infer/ultra_infer/vision/detection/contrib/yolov5/yolov5.h

@@ -47,7 +47,7 @@ public:
    * \param[in] im The input image data, comes from cv::imread(), is a 3-D array
    * with layout HWC, BGR format \param[in] result The output detection result
    * will be written to this structure \param[in] conf_threshold confidence
-   * threashold for postprocessing, default is 0.25 \param[in] nms_threshold iou
+   * threshold for postprocessing, default is 0.25 \param[in] nms_threshold iou
    * threshold for NMS, default is 0.5 \return true if the prediction
    * successed, otherwise false
    */

+ 1 - 1
libs/ultra-infer/ultra_infer/vision/detection/contrib/yolov5lite.h

@@ -46,7 +46,7 @@ public:
    * \param[in] im The input image data, comes from cv::imread(), is a 3-D array
    * with layout HWC, BGR format \param[in] result The output detection result
    * will be written to this structure \param[in] conf_threshold confidence
-   * threashold for postprocessing, default is 0.45 \param[in] nms_iou_threshold
+   * threshold for postprocessing, default is 0.45 \param[in] nms_iou_threshold
    * iou threshold for NMS, default is 0.25 \return true if the prediction
    * successed, otherwise false
    */

+ 1 - 1
libs/ultra-infer/ultra_infer/vision/detection/contrib/yolov6.h

@@ -49,7 +49,7 @@ public:
    * \param[in] im The input image data, comes from cv::imread(), is a 3-D array
    * with layout HWC, BGR format \param[in] result The output detection result
    * will be written to this structure \param[in] conf_threshold confidence
-   * threashold for postprocessing, default is 0.25 \param[in] nms_iou_threshold
+   * threshold for postprocessing, default is 0.25 \param[in] nms_iou_threshold
    * iou threshold for NMS, default is 0.5 \return true if the prediction
    * successed, otherwise false
    */

+ 1 - 1
libs/ultra-infer/ultra_infer/vision/detection/contrib/yolov7/yolov7.h

@@ -47,7 +47,7 @@ public:
    * \param[in] im The input image data, comes from cv::imread(), is a 3-D array
    * with layout HWC, BGR format \param[in] result The output detection result
    * will be written to this structure \param[in] conf_threshold confidence
-   * threashold for postprocessing, default is 0.25 \param[in] nms_threshold iou
+   * threshold for postprocessing, default is 0.25 \param[in] nms_threshold iou
    * threshold for NMS, default is 0.5 \return true if the prediction
    * successed, otherwise false
    */

+ 1 - 1
libs/ultra-infer/ultra_infer/vision/detection/contrib/yolox.h

@@ -46,7 +46,7 @@ public:
    * \param[in] im The input image data, comes from cv::imread(), is a 3-D array
    * with layout HWC, BGR format \param[in] result The output detection result
    * will be written to this structure \param[in] conf_threshold confidence
-   * threashold for postprocessing, default is 0.25 \param[in] nms_iou_threshold
+   * threshold for postprocessing, default is 0.25 \param[in] nms_iou_threshold
    * iou threshold for NMS, default is 0.5 \return true if the prediction
    * successed, otherwise false
    */

+ 1 - 1
libs/ultra-infer/ultra_infer/vision/facedet/contrib/retinaface.h

@@ -50,7 +50,7 @@ public:
    * with layout HWC, BGR format \param[in] result The output face detection
    * result will be written to this structure \param[in] conf_threshold
    * confidence threshold for postprocessing, default is 0.25 \param[in]
-   * nms_iou_threshold iou threashold for NMS, default is 0.4 \return true if
+   * nms_iou_threshold iou threshold for NMS, default is 0.4 \return true if
    * the prediction successed, otherwise false
    */
   virtual bool Predict(cv::Mat *im, FaceDetectionResult *result,

+ 1 - 1
libs/ultra-infer/ultra_infer/vision/facedet/contrib/scrfd.h

@@ -47,7 +47,7 @@ public:
    * with layout HWC, BGR format \param[in] result The output face detection
    * result will be written to this structure \param[in] conf_threshold
    * confidence threshold for postprocessing, default is 0.25 \param[in]
-   * nms_iou_threshold iou threashold for NMS, default is 0.4 \return true if
+   * nms_iou_threshold iou threshold for NMS, default is 0.4 \return true if
    * the prediction successed, otherwise false
    */
   virtual bool Predict(cv::Mat *im, FaceDetectionResult *result,

+ 1 - 1
libs/ultra-infer/ultra_infer/vision/facedet/contrib/ultraface.h

@@ -49,7 +49,7 @@ public:
    * with layout HWC, BGR format \param[in] result The output face detection
    * result will be written to this structure \param[in] conf_threshold
    * confidence threshold for postprocessing, default is 0.7 \param[in]
-   * nms_iou_threshold iou threashold for NMS, default is 0.3 \return true if
+   * nms_iou_threshold iou threshold for NMS, default is 0.3 \return true if
    * the prediction successed, otherwise false
    */
   virtual bool Predict(cv::Mat *im, FaceDetectionResult *result,

+ 1 - 1
libs/ultra-infer/ultra_infer/vision/facedet/contrib/yolov5face.h

@@ -47,7 +47,7 @@ public:
    * with layout HWC, BGR format \param[in] result The output face detection
    * result will be written to this structure \param[in] conf_threshold
    * confidence threshold for postprocessing, default is 0.25 \param[in]
-   * nms_iou_threshold iou threashold for NMS, default is 0.5 \return true if
+   * nms_iou_threshold iou threshold for NMS, default is 0.5 \return true if
    * the prediction successed, otherwise false
    */
   virtual bool Predict(cv::Mat *im, FaceDetectionResult *result,

+ 1 - 1
paddlex/inference/common/result/mixin.py

@@ -614,7 +614,7 @@ class MarkdownMixin:
         Convert the result to markdown format.
 
         Args:
-            pretty (Optional[bool]): wheather to pretty markdown by HTML, default by True.
+            pretty (Optional[bool]): whether to pretty markdown by HTML, default by True.
 
         Returns:
             Dict[str, Union[str, Dict[str, Any]]]: A dictionary containing markdown text and image data.

+ 2 - 2
paddlex/inference/models/common/tokenizer/tokenizer_utils.py

@@ -323,12 +323,12 @@ class InitTrackerMeta(type):
 
         @functools.wraps(init_func)
         def __impl__(self, *args, **kwargs):
-            # registed helper by `pre_init_func`
+            # registered helper by `pre_init_func`
             if pre_init_func:
                 pre_init_func(self, init_func, *args, **kwargs)
             # keep full configuration
             init_func(self, *args, **kwargs)
-            # registed helper by `post_init_func`
+            # registered helper by `post_init_func`
             if post_init_func:
                 post_init_func(self, init_func, *args, **kwargs)
             self.init_config = kwargs

+ 1 - 1
paddlex/inference/models/text_detection/processors.py

@@ -234,7 +234,7 @@ class DetResizeForTest:
 @benchmark.timeit
 @class_requires_deps("opencv-contrib-python")
 class NormalizeImage:
-    """normalize image such as substract mean, divide std"""
+    """normalize image such as subtract mean, divide std"""
 
     def __init__(self, scale=None, mean=None, std=None, order="chw"):
         super().__init__()

+ 1 - 1
paddlex/inference/pipelines/components/faisser.py

@@ -178,7 +178,7 @@ class FaissBuilder:
 
     @classmethod
     def _get_index_type(cls, metric_type, index_type, num=None):
-        # if IVF method, cal ivf number automaticlly
+        # if IVF method, cal ivf number automatically
         if index_type == "IVF":
             index_type = index_type + str(min(int(num // 8), 65536))
             if metric_type in cls.BINARY_METRIC_TYPE:

+ 2 - 2
paddlex/inference/pipelines/layout_parsing/pipeline_v2.py

@@ -1007,8 +1007,8 @@ class _LayoutParsingPipelineV2(BasePipeline):
             seal_det_box_thresh (Optional[float]): Threshold for seal detection boxes.
             seal_det_unclip_ratio (Optional[float]): Ratio for unclipping seal detection boxes.
             seal_rec_score_thresh (Optional[float]): Score threshold for seal recognition.
-            use_wired_table_cells_trans_to_html (bool): Whether to use wired tabel cells trans to HTML.
-            use_wireless_table_cells_trans_to_html (bool): Whether to use wireless tabel cells trans to HTML.
+            use_wired_table_cells_trans_to_html (bool): Whether to use wired table cells trans to HTML.
+            use_wireless_table_cells_trans_to_html (bool): Whether to use wireless table cells trans to HTML.
             use_table_orientation_classify (bool): Whether to use table orientation classification.
             use_ocr_results_with_table_cells (bool): Whether to use OCR results processed by table cells.
             use_e2e_wired_table_rec_model (bool): Whether to use end-to-end wired table recognition model.

+ 1 - 1
paddlex/inference/pipelines/layout_parsing/result_v2.py

@@ -404,7 +404,7 @@ class LayoutParsingResultV2(BaseCVResult, HtmlMixin, XlsxMixin, MarkdownMixin):
         Save the parsing result to a Markdown file.
 
         Args:
-            pretty (Optional[bool]): wheather to pretty markdown by HTML, default by True.
+            pretty (Optional[bool]): whether to pretty markdown by HTML, default by True.
 
         Returns:
             Dict

+ 2 - 2
paddlex/inference/pipelines/layout_parsing/xycut_enhanced/utils.py

@@ -462,11 +462,11 @@ def sort_child_blocks(blocks, direction="horizontal") -> List[LayoutParsingBlock
     return blocks
 
 
-def _get_weights(label, dircetion="horizontal"):
+def _get_weights(label, direction="horizontal"):
     """Define weights based on the label and direction."""
     if label == "doc_title":
         return (
-            [1, 0.1, 0.1, 1] if dircetion == "horizontal" else [0.2, 0.1, 1, 1]
+            [1, 0.1, 0.1, 1] if direction == "horizontal" else [0.2, 0.1, 1, 1]
         )  # left-down ,  right-left
     elif label in [
         "paragraph_title",

+ 1 - 1
paddlex/inference/utils/pp_option.py

@@ -214,7 +214,7 @@ class PaddlePredictorOption(object):
         """set trt config"""
         assert isinstance(
             config, dict
-        ), f"The trt_cfg_setting must be `dict` type, but recived `{type(config)}` type!"
+        ), f"The trt_cfg_setting must be `dict` type, but received `{type(config)}` type!"
         self._update("trt_cfg_setting", config)
 
     @property

+ 4 - 4
paddlex/modules/object_detection/dataset_checker/dataset_src/convert_dataset.py

@@ -267,8 +267,8 @@ def voc_get_label_anno(root_dir, anno_path):
     Read VOC format annotation file.
 
     Args:
-        root_dir (str): The directoty of VOC annotation file.
-        anno_path (str): The annoation file path.
+        root_dir (str): The directory of VOC annotation file.
+        anno_path (str): The annotation file path.
 
     Returns:
         tuple: A tuple of two elements, the first of which is of type dict, representing the mapping between tag names
@@ -295,7 +295,7 @@ def voc_get_label_anno(root_dir, anno_path):
 
 def voc_get_image_info(annotation_root, img_indexer):
     """
-    Get the iamge info from VOC annotation file.
+    Get the image info from VOC annotation file.
 
     Args:
         annotation_root: The annotation root.
@@ -425,7 +425,7 @@ def voc_xmls_to_cocojson(
         output_json_dict["images"].append(img_info)
 
         for obj in ann_root.findall("object"):
-            if obj.find("bndbox") is None:  # Skip the ojbect wihtout bndbox
+            if obj.find("bndbox") is None:  # Skip the object without bndbox
                 continue
             ann = voc_get_coco_annotation(obj=obj, label_indexer=label_indexer)
             ann.update({"image_id": img_info["id"], "id": bnd_id})

+ 1 - 1
paddlex/modules/ts_classification/dataset_checker/dataset_src/split_dataset.py

@@ -55,7 +55,7 @@ def split_dataset(root_dir, train_rate, val_rate, group_id="group_id"):
     df = df.drop_duplicates(keep="first")
 
     group_unique = df[group_id].unique()
-    dfs = []  # seperate multiple group
+    dfs = []  # separate multiple group
     for column in group_unique:
         df_one = df[df[group_id].isin([column])]
         df_one = df_one.drop_duplicates(subset=["time"], keep="first")

+ 2 - 2
paddlex/repo_apis/Paddle3D_api/bev_fusion/model.py

@@ -54,7 +54,7 @@ class BEVFusionModel(BaseModel):
             raise ValueError(f"`dy2st`={dy2st} is not supported.")
         if device in ("cpu", "gpu"):
             logging.warning(
-                f"The device type to use will be automatically determined, which may differ from the sepcified type: {repr(device)}."
+                f"The device type to use will be automatically determined, which may differ from the specified type: {repr(device)}."
             )
 
         # Update YAML config file
@@ -134,7 +134,7 @@ class BEVFusionModel(BaseModel):
 
         if device in ("cpu", "gpu"):
             logging.warning(
-                f"The device type to use will be automatically determined, which may differ from the sepcified type: {repr(device)}."
+                f"The device type to use will be automatically determined, which may differ from the specified type: {repr(device)}."
             )
 
         # Update YAML config file

+ 1 - 1
paddlex/repo_apis/PaddleClas_api/cls/config.py

@@ -464,7 +464,7 @@ indicating that no pretrained model to be used."
         """update directory that save predicting output
 
         Args:
-            save_dir (str): the dicrectory path that save predicting output.
+            save_dir (str): the directory path that save predicting output.
         """
         self.update([f"Infer.save_dir={save_dir}"])
 

+ 1 - 1
paddlex/repo_apis/PaddleClas_api/cls/model.py

@@ -265,7 +265,7 @@ class ClsModel(BaseModel):
             dict_path (str, optional): the label dict file path. Defaults to None.
 
         Returns:
-            CompletedProcess: the result of infering subprocess execution.
+            CompletedProcess: the result of inferring subprocess execution.
         """
         model_dir = abspath(model_dir)
         input_path = abspath(input_path)

+ 1 - 1
paddlex/repo_apis/PaddleClas_api/cls/runner.py

@@ -135,7 +135,7 @@ class ClsRunner(BaseRunner):
             device (str): unused.
 
         Returns:
-            CompletedProcess: the result of infering subprocess execution.
+            CompletedProcess: the result of inferring subprocess execution.
         """
         # `device` unused
         cmd = [self.python, "python/predict_cls.py", "-c", config_path, *cli_args]

+ 1 - 1
paddlex/repo_apis/PaddleClas_api/configs/PP-LCNetV2_base.yaml

@@ -79,7 +79,7 @@ DataLoader:
       name: MultiScaleSampler
       scales: [160, 192, 224, 288, 320]
       # first_bs: batch size for the first image resolution in the scales list
-      # divide_factor: to ensure the width and height dimensions can be devided by downsampling multiple
+      # divide_factor: to ensure the width and height dimensions can be divided by downsampling multiple
       first_bs: 500
       divided_factor: 32
       is_training: True

+ 1 - 1
paddlex/repo_apis/PaddleClas_api/configs/PP-LCNetV2_large.yaml

@@ -79,7 +79,7 @@ DataLoader:
       name: MultiScaleSampler
       scales: [160, 192, 224, 288, 320]
       # first_bs: batch size for the first image resolution in the scales list
-      # divide_factor: to ensure the width and height dimensions can be devided by downsampling multiple
+      # divide_factor: to ensure the width and height dimensions can be divided by downsampling multiple
       first_bs: 250
       divided_factor: 32
       is_training: True

+ 1 - 1
paddlex/repo_apis/PaddleClas_api/configs/PP-LCNetV2_small.yaml

@@ -79,7 +79,7 @@ DataLoader:
       name: MultiScaleSampler
       scales: [160, 192, 224, 288, 320]
       # first_bs: batch size for the first image resolution in the scales list
-      # divide_factor: to ensure the width and height dimensions can be devided by downsampling multiple
+      # divide_factor: to ensure the width and height dimensions can be divided by downsampling multiple
       first_bs: 500
       divided_factor: 32
       is_training: True

+ 1 - 1
paddlex/repo_apis/PaddleDetection_api/instance_seg/model.py

@@ -310,7 +310,7 @@ class InstanceSegModel(BaseModel):
             save_dir (str, optional): the directory path to save output. Defaults to None.
 
         Returns:
-            CompletedProcess: the result of infering subprocess execution.
+            CompletedProcess: the result of inferring subprocess execution.
         """
         model_dir = abspath(model_dir)
         input_path = abspath(input_path)

+ 1 - 1
paddlex/repo_apis/PaddleDetection_api/instance_seg/runner.py

@@ -138,7 +138,7 @@ class InstanceSegRunner(BaseRunner):
             device (str): unused.
 
         Returns:
-            CompletedProcess: the result of infering subprocess execution.
+            CompletedProcess: the result of inferring subprocess execution.
         """
         # `device` unused
         cmd = [self.python, "deploy/python/infer.py", "--use_fd_format", *cli_args]

+ 1 - 1
paddlex/repo_apis/PaddleDetection_api/object_det/config.py

@@ -423,7 +423,7 @@ class DetConfig(BaseConfig, PPDetConfigMixin):
 
         Args:
             config (dict): the original config.
-            update_dict (dict): to be updated paramenters and its values
+            update_dict (dict): to be updated parameters and its values
 
         Example:
             self._recursively_set(self.HybridEncoder, {'encoder_layer': {'dim_feedforward': 2048}})

+ 1 - 1
paddlex/repo_apis/PaddleDetection_api/object_det/model.py

@@ -335,7 +335,7 @@ class DetModel(BaseModel):
             save_dir (str, optional): the directory path to save output. Defaults to None.
 
         Returns:
-            CompletedProcess: the result of infering subprocess execution.
+            CompletedProcess: the result of inferring subprocess execution.
         """
         model_dir = abspath(model_dir)
         input_path = abspath(input_path)

+ 1 - 1
paddlex/repo_apis/PaddleDetection_api/object_det/runner.py

@@ -138,7 +138,7 @@ class DetRunner(BaseRunner):
             device (str): unused.
 
         Returns:
-            CompletedProcess: the result of infering subprocess execution.
+            CompletedProcess: the result of inferring subprocess execution.
         """
         # `device` unused
         cmd = [self.python, "deploy/python/infer.py", "--use_fd_format", *cli_args]

+ 3 - 3
paddlex/repo_apis/PaddleOCR_api/formula_rec/config.py

@@ -420,11 +420,11 @@ class FormulaRecConfig(BaseConfig):
         self._update_save_interval(save_interval)
 
     def _update_infer_img(self, infer_img: str, infer_list: str = None):
-        """update image list to be infered
+        """update image list to be inferred
 
         Args:
-            infer_img (str): path to the image file to be infered. It would be ignored when `infer_list` is be set.
-            infer_list (str, optional): path to the .txt file containing the paths to image to be infered.
+            infer_img (str): path to the image file to be inferred. It would be ignored when `infer_list` is be set.
+            infer_list (str, optional): path to the .txt file containing the paths to image to be inferred.
                 Defaults to None.
         """
         if infer_list:

+ 1 - 1
paddlex/repo_apis/PaddleOCR_api/formula_rec/model.py

@@ -297,7 +297,7 @@ class FormulaRecModel(BaseModel):
             save_dir (str, optional): the directory path to save output. Defaults to None.
 
         Returns:
-            CompletedProcess: the result of infering subprocess execution.
+            CompletedProcess: the result of inferring subprocess execution.
         """
         config = self.config.copy()
         cli_args = []

+ 1 - 1
paddlex/repo_apis/PaddleOCR_api/formula_rec/runner.py

@@ -131,7 +131,7 @@ class FormulaRecRunner(BaseRunner):
             device (str): unused.
 
         Returns:
-            CompletedProcess: the result of infering subprocess execution.
+            CompletedProcess: the result of inferring subprocess execution.
         """
         cmd = [self.python, "tools/infer/predict_rec.py", *cli_args]
         return self.run_cmd(cmd, switch_wdir=True, echo=True, silent=False)

+ 1 - 1
paddlex/repo_apis/PaddleOCR_api/table_rec/model.py

@@ -85,7 +85,7 @@ class TableRecModel(TextRecModel):
             save_dir (str, optional): the directory path to save output. Defaults to None.
 
         Returns:
-            CompletedProcess: the result of infering subprocess execution.
+            CompletedProcess: the result of inferring subprocess execution.
         """
         config = self.config.copy()
         cli_args = []

+ 1 - 1
paddlex/repo_apis/PaddleOCR_api/table_rec/runner.py

@@ -45,7 +45,7 @@ class TableRecRunner(TextRecRunner):
             device (str): unused.
 
         Returns:
-            CompletedProcess: the result of infering subprocess execution.
+            CompletedProcess: the result of inferring subprocess execution.
         """
         cmd = [self.python, "ppstructure/table/predict_structure.py", *cli_args]
         return self.run_cmd(cmd, switch_wdir=True, echo=True, silent=False)

+ 1 - 1
paddlex/repo_apis/PaddleOCR_api/text_det/model.py

@@ -41,7 +41,7 @@ class TextDetModel(TextRecModel):
             save_dir (str, optional): the directory path to save output. Defaults to None.
 
         Returns:
-            CompletedProcess: the result of infering subprocess execution.
+            CompletedProcess: the result of inferring subprocess execution.
         """
         config = self.config.copy()
         cli_args = []

+ 1 - 1
paddlex/repo_apis/PaddleOCR_api/text_det/runner.py

@@ -46,7 +46,7 @@ class TextDetRunner(TextRecRunner):
             device (str): unused.
 
         Returns:
-            CompletedProcess: the result of infering subprocess execution.
+            CompletedProcess: the result of inferring subprocess execution.
         """
         # `config_path` and `device` unused
         cmd = [self.python, "tools/infer/predict_det.py", *cli_args]

+ 3 - 3
paddlex/repo_apis/PaddleOCR_api/text_rec/config.py

@@ -413,11 +413,11 @@ class TextRecConfig(BaseConfig):
         self._update_save_interval(save_interval)
 
     def _update_infer_img(self, infer_img: str, infer_list: str = None):
-        """update image list to be infered
+        """update image list to be inferred
 
         Args:
-            infer_img (str): path to the image file to be infered. It would be ignored when `infer_list` is be set.
-            infer_list (str, optional): path to the .txt file containing the paths to image to be infered.
+            infer_img (str): path to the image file to be inferred. It would be ignored when `infer_list` is be set.
+            infer_list (str, optional): path to the .txt file containing the paths to image to be inferred.
                 Defaults to None.
         """
         if infer_list:

+ 1 - 1
paddlex/repo_apis/PaddleOCR_api/text_rec/model.py

@@ -297,7 +297,7 @@ class TextRecModel(BaseModel):
             save_dir (str, optional): the directory path to save output. Defaults to None.
 
         Returns:
-            CompletedProcess: the result of infering subprocess execution.
+            CompletedProcess: the result of inferring subprocess execution.
         """
         config = self.config.copy()
         cli_args = []

+ 1 - 1
paddlex/repo_apis/PaddleOCR_api/text_rec/runner.py

@@ -131,7 +131,7 @@ class TextRecRunner(BaseRunner):
             device (str): unused.
 
         Returns:
-            CompletedProcess: the result of infering subprocess execution.
+            CompletedProcess: the result of inferring subprocess execution.
         """
         cmd = [self.python, "tools/infer/predict_rec.py", *cli_args]
         return self.run_cmd(cmd, switch_wdir=True, echo=True, silent=False)

+ 1 - 1
paddlex/repo_apis/PaddleSeg_api/seg/model.py

@@ -388,7 +388,7 @@ class SegModel(BaseModel):
             save_dir (str, optional): the directory path to save output. Defaults to None.
 
         Returns:
-            CompletedProcess: the result of infering subprocess execution.
+            CompletedProcess: the result of inferring subprocess execution.
         """
         config = self.config.copy()
         cli_args = []

+ 1 - 1
paddlex/repo_apis/PaddleSeg_api/seg/runner.py

@@ -151,7 +151,7 @@ class SegRunner(BaseRunner):
             device (str): unused.
 
         Returns:
-            CompletedProcess: the result of infering subprocess execution.
+            CompletedProcess: the result of inferring subprocess execution.
         """
         # `device` unused
         cli_args = self._gather_opts_args(cli_args)

+ 3 - 3
paddlex/repo_apis/PaddleVideo_api/configs/PP-TSM-R50_8frames_uniform.yaml

@@ -50,7 +50,7 @@ PIPELINE: #PIPELINE field
             num_seg: 8
             seg_len: 1
             valid_mode: False
-        transform: #Mandotary, image transfrom operator
+        transform: #Mandotary, image transform operator
             - Scale:
                 short_size: 256
             - MultiScaleCrop:
@@ -62,7 +62,7 @@ PIPELINE: #PIPELINE field
             - Normalization:
                 mean: [0.485, 0.456, 0.406]
                 std: [0.229, 0.224, 0.225]
-    valid: #Mandatory, indicate the pipeline to deal with the validing data. associate to the 'paddlevideo/loader/pipelines/'
+    valid: #Mandatory, indicate the pipeline to deal with the validating data. associate to the 'paddlevideo/loader/pipelines/'
         decode:
             name: "VideoDecoder"
             backend: "decord"
@@ -80,7 +80,7 @@ PIPELINE: #PIPELINE field
             - Normalization:
                 mean: [0.485, 0.456, 0.406]
                 std: [0.229, 0.224, 0.225]
-    test:  #Mandatory, indicate the pipeline to deal with the validing data. associate to the 'paddlevideo/loader/pipelines/'
+    test:  #Mandatory, indicate the pipeline to deal with the validating data. associate to the 'paddlevideo/loader/pipelines/'
         decode:
             name: "VideoDecoder"
             backend: "decord"

+ 3 - 3
paddlex/repo_apis/PaddleVideo_api/configs/PP-TSMv2-LCNetV2_16frames_uniform.yaml

@@ -49,7 +49,7 @@ PIPELINE: #PIPELINE field
             num_seg: 16
             seg_len: 1
             valid_mode: False
-        transform: #Mandotary, image transfrom operator
+        transform: #Mandotary, image transform operator
             - Scale:
                 short_size: 256
             - MultiScaleCrop:
@@ -61,7 +61,7 @@ PIPELINE: #PIPELINE field
             - Normalization:
                 mean: [0.485, 0.456, 0.406]
                 std: [0.229, 0.224, 0.225]
-    valid: #Mandatory, indicate the pipeline to deal with the validing data. associate to the 'paddlevideo/loader/pipelines/'
+    valid: #Mandatory, indicate the pipeline to deal with the validating data. associate to the 'paddlevideo/loader/pipelines/'
         decode:
             name: "VideoDecoder"
             backend: "decord"
@@ -79,7 +79,7 @@ PIPELINE: #PIPELINE field
             - Normalization:
                 mean: [0.485, 0.456, 0.406]
                 std: [0.229, 0.224, 0.225]
-    test:  #Mandatory, indicate the pipeline to deal with the validing data. associate to the 'paddlevideo/loader/pipelines/'
+    test:  #Mandatory, indicate the pipeline to deal with the validating data. associate to the 'paddlevideo/loader/pipelines/'
         decode:
             name: "VideoDecoder"
             backend: "decord"

+ 3 - 3
paddlex/repo_apis/PaddleVideo_api/configs/PP-TSMv2-LCNetV2_8frames_uniform.yaml

@@ -46,7 +46,7 @@ PIPELINE: #PIPELINE field
             num_seg: 8
             seg_len: 1
             valid_mode: False
-        transform: #Mandotary, image transfrom operator
+        transform: #Mandotary, image transform operator
             - Scale:
                 short_size: 256
             - MultiScaleCrop:
@@ -58,7 +58,7 @@ PIPELINE: #PIPELINE field
             - Normalization:
                 mean: [0.485, 0.456, 0.406]
                 std: [0.229, 0.224, 0.225]
-    valid: #Mandatory, indicate the pipeline to deal with the validing data. associate to the 'paddlevideo/loader/pipelines/'
+    valid: #Mandatory, indicate the pipeline to deal with the validating data. associate to the 'paddlevideo/loader/pipelines/'
         decode:
             name: "VideoDecoder"
             backend: "decord"
@@ -76,7 +76,7 @@ PIPELINE: #PIPELINE field
             - Normalization:
                 mean: [0.485, 0.456, 0.406]
                 std: [0.229, 0.224, 0.225]
-    test:  #Mandatory, indicate the pipeline to deal with the validing data. associate to the 'paddlevideo/loader/pipelines/'
+    test:  #Mandatory, indicate the pipeline to deal with the validating data. associate to the 'paddlevideo/loader/pipelines/'
         decode:
             name: "VideoDecoder"
             backend: "decord"

+ 1 - 1
paddlex/repo_apis/PaddleVideo_api/video_cls/config.py

@@ -435,7 +435,7 @@ indicating that no pretrained model to be used."
         """update directory that save predicting output
 
         Args:
-            save_dir (str): the dicrectory path that save predicting output.
+            save_dir (str): the directory path that save predicting output.
         """
         self.update({"Infer.save_dir": save_dir})
 

+ 1 - 1
paddlex/repo_apis/PaddleVideo_api/video_cls/model.py

@@ -257,7 +257,7 @@ class VideoClsModel(BaseModel):
             dict_path (str, optional): the label dict file path. Defaults to None.
 
         Returns:
-            CompletedProcess: the result of infering subprocess execution.
+            CompletedProcess: the result of inferring subprocess execution.
         """
         model_dir = abspath(model_dir)
         input_path = abspath(input_path)

+ 1 - 1
paddlex/repo_apis/PaddleVideo_api/video_cls/runner.py

@@ -136,7 +136,7 @@ class VideoClsRunner(BaseRunner):
             device (str): unused.
 
         Returns:
-            CompletedProcess: the result of infering subprocess execution.
+            CompletedProcess: the result of inferring subprocess execution.
         """
         # `device` unused
         cmd = [self.python, "python/predict_cls.py", "-c", config_path, *cli_args]

+ 1 - 1
paddlex/repo_apis/PaddleVideo_api/video_det/config.py

@@ -436,7 +436,7 @@ indicating that no pretrained model to be used."
         """update directory that save predicting output
 
         Args:
-            save_dir (str): the dicrectory path that save predicting output.
+            save_dir (str): the directory path that save predicting output.
         """
         self.update({"Infer.save_dir": save_dir})
 

+ 1 - 1
paddlex/repo_apis/PaddleVideo_api/video_det/model.py

@@ -258,7 +258,7 @@ class VideoDetModel(BaseModel):
             dict_path (str, optional): the label dict file path. Defaults to None.
 
         Returns:
-            CompletedProcess: the result of infering subprocess execution.
+            CompletedProcess: the result of inferring subprocess execution.
         """
         model_dir = abspath(model_dir)
         input_path = abspath(input_path)

+ 1 - 1
paddlex/repo_apis/PaddleVideo_api/video_det/runner.py

@@ -136,7 +136,7 @@ class VideoDetRunner(BaseRunner):
             device (str): unused.
 
         Returns:
-            CompletedProcess: the result of infering subprocess execution.
+            CompletedProcess: the result of inferring subprocess execution.
         """
         # `device` unused
         cmd = [self.python, "python/predict_cls.py", "-c", config_path, *cli_args]

+ 1 - 1
paddlex/repo_apis/base/config.py

@@ -106,7 +106,7 @@ class BaseConfig(_Config, metaclass=abc.ABCMeta):
     """
     Abstract base class of Config.
 
-    Config provides the funtionality to load, parse, or dump to a configuration
+    Config provides the functionality to load, parse, or dump to a configuration
     file with a specific format. Also, it provides APIs to update configurations
     of several important hyperparameters and model components.
     """

+ 2 - 2
paddlex/repo_manager/core.py

@@ -101,7 +101,7 @@ def setup(
     """setup"""
     if update_repos and use_local_repos:
         logging.error(
-            f"The `--update_repos` and `--use_local_repos` should not be True at the same time. They are global setting for all repos. `--update_repos` means that update all repos to sync with remote, and `--use_local_repos` means that don't update when local repo is exsting."
+            f"The `--update_repos` and `--use_local_repos` should not be True at the same time. They are global setting for all repos. `--update_repos` means that update all repos to sync with remote, and `--use_local_repos` means that don't update when local repo is existing."
         )
         raise Exception()
 
@@ -136,7 +136,7 @@ def setup(
                 except EOFError:
                     logging.warning(
                         "Unable to read from stdin. Please set `--use_local_repos` to \
-                        True or False to apply a global setting for using exsting or re-getting repos."
+                        True or False to apply a global setting for using existing or re-getting repos."
                     )
                     raise
                 remove_existing = remove_existing.lower() in ("y", "yes")

+ 1 - 1
paddlex/repo_manager/repo.py

@@ -331,7 +331,7 @@ class RepositoryGroupInstaller(object):
 
     def _sort_repos(self, repos, check_missing=False):
         # We sort the repos to ensure that the dependencies precede the
-        # dependant in the list.
+        # dependent in the list.
         name_meta_pairs = []
         for repo in repos:
             name_meta_pairs.append((repo.name, repo.meta))

+ 1 - 1
paddlex/utils/misc.py

@@ -70,7 +70,7 @@ class CachedProperty(object):
     https://github.com/pydanny/cached-property/blob/master/cached_property.py .
 
     Note that this implementation does NOT work in multi-thread or coroutine
-    senarios.
+    scenarios.
     """
 
     def __init__(self, func):