Browse Source

Merge pull request #1767 from cuicheng01/develop

polish docs and fix bugs
cuicheng01 1 year ago
parent
commit
0f6f80734c
100 changed files with 185 additions and 196 deletions
  1. 9 7
      README.md
  2. 1 3
      docs/tutorials/INSTALL.md
  3. 7 7
      docs/tutorials/inference/pipeline_inference_tools.md
  4. 1 1
      paddlex/configs/image_classification/CLIP_vit_base_patch16_224.yaml
  5. 1 1
      paddlex/configs/image_classification/CLIP_vit_large_patch14_224.yaml
  6. 1 1
      paddlex/configs/image_classification/ConvNeXt_tiny.yaml
  7. 1 1
      paddlex/configs/image_classification/MobileNetV2_x0_25.yaml
  8. 1 1
      paddlex/configs/image_classification/MobileNetV2_x0_5.yaml
  9. 1 1
      paddlex/configs/image_classification/MobileNetV2_x1_0.yaml
  10. 1 1
      paddlex/configs/image_classification/MobileNetV2_x1_5.yaml
  11. 1 1
      paddlex/configs/image_classification/MobileNetV2_x2_0.yaml
  12. 1 1
      paddlex/configs/image_classification/MobileNetV3_large_x0_35.yaml
  13. 1 1
      paddlex/configs/image_classification/MobileNetV3_large_x0_5.yaml
  14. 1 1
      paddlex/configs/image_classification/MobileNetV3_large_x0_75.yaml
  15. 1 1
      paddlex/configs/image_classification/MobileNetV3_large_x1_0.yaml
  16. 1 1
      paddlex/configs/image_classification/MobileNetV3_large_x1_25.yaml
  17. 1 1
      paddlex/configs/image_classification/MobileNetV3_small_x0_35.yaml
  18. 1 1
      paddlex/configs/image_classification/MobileNetV3_small_x0_5.yaml
  19. 1 1
      paddlex/configs/image_classification/MobileNetV3_small_x0_75.yaml
  20. 1 1
      paddlex/configs/image_classification/MobileNetV3_small_x1_0.yaml
  21. 1 1
      paddlex/configs/image_classification/MobileNetV3_small_x1_25.yaml
  22. 1 1
      paddlex/configs/image_classification/PP-HGNetV2-B0.yaml
  23. 1 1
      paddlex/configs/image_classification/PP-HGNetV2-B4.yaml
  24. 1 1
      paddlex/configs/image_classification/PP-HGNetV2-B6.yaml
  25. 1 1
      paddlex/configs/image_classification/PP-HGNet_small.yaml
  26. 1 1
      paddlex/configs/image_classification/PP-LCNet_x0_25.yaml
  27. 1 1
      paddlex/configs/image_classification/PP-LCNet_x0_35.yaml
  28. 1 1
      paddlex/configs/image_classification/PP-LCNet_x0_5.yaml
  29. 1 1
      paddlex/configs/image_classification/PP-LCNet_x0_75.yaml
  30. 1 1
      paddlex/configs/image_classification/PP-LCNet_x1_0.yaml
  31. 1 1
      paddlex/configs/image_classification/PP-LCNet_x1_5.yaml
  32. 1 1
      paddlex/configs/image_classification/PP-LCNet_x2_0.yaml
  33. 1 1
      paddlex/configs/image_classification/PP-LCNet_x2_5.yaml
  34. 1 1
      paddlex/configs/image_classification/ResNet101.yaml
  35. 1 1
      paddlex/configs/image_classification/ResNet152.yaml
  36. 1 1
      paddlex/configs/image_classification/ResNet18.yaml
  37. 1 1
      paddlex/configs/image_classification/ResNet34.yaml
  38. 1 1
      paddlex/configs/image_classification/ResNet50.yaml
  39. 1 1
      paddlex/configs/image_classification/SwinTransformer_base_patch4_window7_224.yaml
  40. 1 1
      paddlex/configs/instance_segmentation/Mask-RT-DETR-H.yaml
  41. 1 1
      paddlex/configs/instance_segmentation/Mask-RT-DETR-L.yaml
  42. 1 1
      paddlex/configs/object_detection/PP-YOLOE_plus-L.yaml
  43. 1 1
      paddlex/configs/object_detection/PP-YOLOE_plus-M.yaml
  44. 1 1
      paddlex/configs/object_detection/PP-YOLOE_plus-S.yaml
  45. 1 1
      paddlex/configs/object_detection/PP-YOLOE_plus-X.yaml
  46. 1 1
      paddlex/configs/object_detection/PicoDet-L.yaml
  47. 1 1
      paddlex/configs/object_detection/PicoDet-S.yaml
  48. 1 1
      paddlex/configs/object_detection/RT-DETR-H.yaml
  49. 1 1
      paddlex/configs/object_detection/RT-DETR-L.yaml
  50. 1 1
      paddlex/configs/object_detection/RT-DETR-R18.yaml
  51. 1 1
      paddlex/configs/object_detection/RT-DETR-R50.yaml
  52. 1 1
      paddlex/configs/object_detection/RT-DETR-X.yaml
  53. 1 1
      paddlex/configs/semantic_segmentation/Deeplabv3-R101.yaml
  54. 1 1
      paddlex/configs/semantic_segmentation/Deeplabv3-R50.yaml
  55. 1 1
      paddlex/configs/semantic_segmentation/Deeplabv3_Plus-R101.yaml
  56. 1 1
      paddlex/configs/semantic_segmentation/Deeplabv3_Plus-R50.yaml
  57. 1 1
      paddlex/configs/semantic_segmentation/OCRNet_HRNet-W48.yaml
  58. 1 1
      paddlex/configs/semantic_segmentation/PP-LiteSeg-T.yaml
  59. 1 1
      paddlex/configs/text_detection/PP-OCRv4_mobile_det.yaml
  60. 1 1
      paddlex/configs/text_detection/PP-OCRv4_server_det.yaml
  61. 1 1
      paddlex/configs/text_recognition/PP-OCRv4_mobile_rec.yaml
  62. 1 1
      paddlex/configs/text_recognition/PP-OCRv4_server_rec.yaml
  63. 2 2
      paddlex/engine.py
  64. 4 5
      paddlex/modules/base/dataset_checker/dataset_checker.py
  65. 7 12
      paddlex/modules/base/evaluator.py
  66. 5 5
      paddlex/modules/base/predictor/predictor.py
  67. 1 1
      paddlex/modules/base/predictor/utils/official_models.py
  68. 4 5
      paddlex/modules/base/trainer/train_deamon.py
  69. 6 10
      paddlex/modules/base/trainer/trainer.py
  70. 4 4
      paddlex/modules/image_classification/dataset_checker/__init__.py
  71. 2 2
      paddlex/modules/image_classification/dataset_checker/dataset_src/analyse_dataset.py
  72. 4 5
      paddlex/modules/image_classification/dataset_checker/dataset_src/check_dataset.py
  73. 2 2
      paddlex/modules/image_classification/evaluator.py
  74. 1 1
      paddlex/modules/image_classification/model_list.py
  75. 3 3
      paddlex/modules/image_classification/predictor/predictor.py
  76. 1 1
      paddlex/modules/image_classification/predictor/transforms.py
  77. 5 2
      paddlex/modules/image_classification/predictor/utils.py
  78. 2 2
      paddlex/modules/image_classification/trainer.py
  79. 4 4
      paddlex/modules/instance_segmentation/dataset_checker/__init__.py
  80. 2 2
      paddlex/modules/instance_segmentation/dataset_checker/dataset_src/analyse_dataset.py
  81. 3 3
      paddlex/modules/instance_segmentation/dataset_checker/dataset_src/check_dataset.py
  82. 2 2
      paddlex/modules/instance_segmentation/evaluator.py
  83. 1 1
      paddlex/modules/instance_segmentation/model_list.py
  84. 2 2
      paddlex/modules/instance_segmentation/predictor/predictor.py
  85. 2 2
      paddlex/modules/instance_segmentation/trainer.py
  86. 4 4
      paddlex/modules/object_detection/dataset_checker/__init__.py
  87. 2 2
      paddlex/modules/object_detection/dataset_checker/dataset_src/analyse_dataset.py
  88. 3 3
      paddlex/modules/object_detection/dataset_checker/dataset_src/check_dataset.py
  89. 4 4
      paddlex/modules/object_detection/dataset_checker/dataset_src/convert_dataset.py
  90. 2 2
      paddlex/modules/object_detection/evaluator.py
  91. 1 1
      paddlex/modules/object_detection/model_list.py
  92. 3 3
      paddlex/modules/object_detection/predictor/predictor.py
  93. 2 2
      paddlex/modules/object_detection/trainer.py
  94. 4 4
      paddlex/modules/semantic_segmentation/dataset_checker/__init__.py
  95. 2 2
      paddlex/modules/semantic_segmentation/dataset_checker/dataset_src/analyse_dataset.py
  96. 3 4
      paddlex/modules/semantic_segmentation/dataset_checker/dataset_src/check_dataset.py
  97. 2 2
      paddlex/modules/semantic_segmentation/evaluator.py
  98. 1 1
      paddlex/modules/semantic_segmentation/model_list.py
  99. 5 6
      paddlex/modules/semantic_segmentation/predictor/predictor.py
  100. 2 2
      paddlex/modules/semantic_segmentation/trainer.py

+ 9 - 7
README.md

@@ -24,8 +24,8 @@ PaddleX3.0 分为本地端和云端,本地端提供统一任务API接口,支
 ## 安装与快速开始
 - [安装](./docs/tutorials/INSTALL.md)
 - 快速开始
-  - [单模型开发工具快速开始](./docs/tutorials/QUCK_STARTED.md)
-  - 模型产线开发工具快速开始(comming soon)
+  - [单模型开发工具](./docs/tutorials/tools/model_tools.md)
+  - [模型产线开发工具](./docs/tutorials/tools/pipelines_tools.md)
 
 ## 单模型开发工具
 本节介绍 PaddleX3.0 单模型的全流程开发流程,包括数据准备、模型训练/评估、模型推理的使用方法。PaddleX3.0 支持的模型可以参考 [PaddleX模型库](./docs/tutorials/models/support_model_list.md)。
@@ -35,17 +35,19 @@ PaddleX3.0 分为本地端和云端,本地端提供统一任务API接口,支
 - [数据标注](./docs/tutorials/data/annotation/README.md)
 - [数据校验](./docs/tutorials/data/dataset_check.md)
 ### 2. 模型训练
-- [模型训练/评估](./docs/tutorials/train/README.md)
-- 模型优化(comming soon)
+- [模型训练/评估](./docs/tutorials/base/README.md)
+- [模型优化](./docs/tutorials/base/model_optimize.md)
 
 ### 3. 模型推理
-- 模型推理能力即将开源,敬请期待...
+ - [模型推理](docs/tutorials/inference/model_inference_tools.md)
+ - [模型推理 API 介绍](docs/tutorials/inference/model_infernce_api.md)
 
 ## 模型产线开发工具
-本节将介绍 PaddleX3.0 模型产线的全流程开发流程,此部分内容即将开源,敬请期待...
+ - [模型产线推理](docs/tutorials/inference/pipeline_inference_tools.md)
+ - [模型产线推理 API 介绍](docs/tutorials/inference/pipeline_infernce_api.md)
 
 ## 多硬件支持
-🔥 本项目支持在多种硬件上进行模型的开发,除了GPU外,当前支持的硬件还有**昆仑芯**、**昇腾芯**、**寒武纪芯**。只需添加一个配置设备的参数,即可在对应硬件上使用上述工具。详情参考[多硬件训练](./docs/tutorials/train/multi_device_train.md)
+🔥 本项目支持在多种硬件上进行模型的开发,除了 GPU 外,当前支持的硬件还有**昆仑芯**、**昇腾芯**、**寒武纪芯**。只需添加一个配置设备的参数,即可在对应硬件上使用上述工具。详情可以参考上述文档
 
 - 昇腾芯支持的模型列表请参考 [PaddleX 昇腾芯模型列表](./docs/tutorials/models/support_npu_model_list.md)。
 - 昆仑芯支持的模型列表请参考 [PaddleX 昆仑芯模型列表](./docs/tutorials/models/support_xpu_model_list.md)。

+ 1 - 3
docs/tutorials/INSTALL.md

@@ -88,11 +88,9 @@ git clone https://gitee.com/paddlepaddle/PaddleX.git
 <!-- 这里需要指明安装成功的状态, 廷权 -->
 ```bash
 cd PaddleX
-# 安装第三方依赖
-pip install -r requirements.txt
 
 # 安装 PaddleX whl
-# -e:以可编辑模式安装,当前项目的代码更改,都会作用到 PaddleX Wheel
+# -e:以可编辑模式安装,当前项目的代码更改,都会直接作用到已经安装的 PaddleX Wheel
 pip install -e .
 
 # 安装 PaddleX 相关依赖

+ 7 - 7
docs/tutorials/inference/pipeline_inference_tools.md

@@ -38,7 +38,7 @@ result = pipeline.predict(
         {'input_path': "https://paddle-model-ecology.bj.bcebos.com/paddlex/imgs/demo_image/general_image_classification_001.jpg"}
     )
 print(result["cls_result"])
-```    
+```  
 </details>
 
 ### 2.2 目标检测产线
@@ -71,8 +71,8 @@ from paddlex import PaddleInferenceOption
 model_name =  "RT-DETR-L"
 output_base = Path("output")
 
-output_dir = output_base / model_name
-pipeline = DetPipeline(model_name, output_dir=output_dir, kernel_option=PaddleInferenceOption())
+output = output_base / model_name
+pipeline = DetPipeline(model_name, output=output, kernel_option=PaddleInferenceOption())
 result = pipeline.predict(
         {"input_path": "https://paddle-model-ecology.bj.bcebos.com/paddlex/imgs/demo_image/general_object_detection_002.png"})
 print(result["boxes"])
@@ -111,8 +111,8 @@ from paddlex import PaddleInferenceOption
 
 model_name = "PP-LiteSeg-T",
 output_base = Path("output")
-output_dir = output_base / model_name
-pipeline = SegPipeline(model_name, output_dir=output_dir, kernel_option=PaddleInferenceOption())
+output = output_base / model_name
+pipeline = SegPipeline(model_name, output=output, kernel_option=PaddleInferenceOption())
 result = pipeline.predict(
     {"input_path": "https://paddle-model-ecology.bj.bcebos.com/paddlex/imgs/demo_image/general_semantic_segmentation_002.png"}
 )
@@ -152,8 +152,8 @@ from paddlex import PaddleInferenceOption
 model_name =  "Mask-RT-DETR-L"
 output_base = Path("output")
 
-output_dir = output_base / model_name
-pipeline = DetPipeline(model_name, output_dir=output_dir, kernel_option=PaddleInferenceOption())
+output = output_base / model_name
+pipeline = DetPipeline(model_name, output=output, kernel_option=PaddleInferenceOption())
 result = pipeline.predict(
     {"input_path": "https://paddle-model-ecology.bj.bcebos.com/paddlex/imgs/demo_image/general_instance_segmentation_004.png"})
 print(result["boxes"])

+ 1 - 1
paddlex/configs/image_classification/CLIP_vit_base_patch16_224.yaml

@@ -32,7 +32,7 @@ Evaluate:
 
 Predict:
   model_dir: "output/best_model"
-  input_path: "/paddle/dataset/paddlex/cls/cls_flowers_examples/images/image_00002.jpg"
+  input_path: "https://paddle-model-ecology.bj.bcebos.com/paddlex/imgs/demo_image/general_image_classification_001.jpg"
   kernel_option:
     run_mode: paddle
     batch_size: 1

+ 1 - 1
paddlex/configs/image_classification/CLIP_vit_large_patch14_224.yaml

@@ -32,7 +32,7 @@ Evaluate:
 
 Predict:
   model_dir: "output/best_model"
-  input_path: "/paddle/dataset/paddlex/cls/cls_flowers_examples/images/image_00002.jpg"
+  input_path: "https://paddle-model-ecology.bj.bcebos.com/paddlex/imgs/demo_image/general_image_classification_001.jpg"
   kernel_option:
     run_mode: paddle
     batch_size: 1

+ 1 - 1
paddlex/configs/image_classification/ConvNeXt_tiny.yaml

@@ -32,7 +32,7 @@ Evaluate:
 
 Predict:
   model_dir: "output/best_model"
-  input_path: "/paddle/dataset/paddlex/cls/cls_flowers_examples/images/image_00002.jpg"
+  input_path: "https://paddle-model-ecology.bj.bcebos.com/paddlex/imgs/demo_image/general_image_classification_001.jpg"
   kernel_option:
     run_mode: paddle
     batch_size: 1

+ 1 - 1
paddlex/configs/image_classification/MobileNetV2_x0_25.yaml

@@ -32,7 +32,7 @@ Evaluate:
 
 Predict:
   model_dir: "output/best_model"
-  input_path: "/paddle/dataset/paddlex/cls/cls_flowers_examples/images/image_00002.jpg"
+  input_path: "https://paddle-model-ecology.bj.bcebos.com/paddlex/imgs/demo_image/general_image_classification_001.jpg"
   kernel_option:
     run_mode: paddle
     batch_size: 1

+ 1 - 1
paddlex/configs/image_classification/MobileNetV2_x0_5.yaml

@@ -32,7 +32,7 @@ Evaluate:
 
 Predict:
   model_dir: "output/best_model"
-  input_path: "/paddle/dataset/paddlex/cls/cls_flowers_examples/images/image_00002.jpg"
+  input_path: "https://paddle-model-ecology.bj.bcebos.com/paddlex/imgs/demo_image/general_image_classification_001.jpg"
   kernel_option:
     run_mode: paddle
     batch_size: 1

+ 1 - 1
paddlex/configs/image_classification/MobileNetV2_x1_0.yaml

@@ -32,7 +32,7 @@ Evaluate:
 
 Predict:
   model_dir: "output/best_model"
-  input_path: "/paddle/dataset/paddlex/cls/cls_flowers_examples/images/image_00002.jpg"
+  input_path: "https://paddle-model-ecology.bj.bcebos.com/paddlex/imgs/demo_image/general_image_classification_001.jpg"
   kernel_option:
     run_mode: paddle
     batch_size: 1

+ 1 - 1
paddlex/configs/image_classification/MobileNetV2_x1_5.yaml

@@ -32,7 +32,7 @@ Evaluate:
 
 Predict:
   model_dir: "output/best_model"
-  input_path: "/paddle/dataset/paddlex/cls/cls_flowers_examples/images/image_00002.jpg"
+  input_path: "https://paddle-model-ecology.bj.bcebos.com/paddlex/imgs/demo_image/general_image_classification_001.jpg"
   kernel_option:
     run_mode: paddle
     batch_size: 1

+ 1 - 1
paddlex/configs/image_classification/MobileNetV2_x2_0.yaml

@@ -32,7 +32,7 @@ Evaluate:
 
 Predict:
   model_dir: "output/best_model"
-  input_path: "/paddle/dataset/paddlex/cls/cls_flowers_examples/images/image_00002.jpg"
+  input_path: "https://paddle-model-ecology.bj.bcebos.com/paddlex/imgs/demo_image/general_image_classification_001.jpg"
   kernel_option:
     run_mode: paddle
     batch_size: 1

+ 1 - 1
paddlex/configs/image_classification/MobileNetV3_large_x0_35.yaml

@@ -32,7 +32,7 @@ Evaluate:
 
 Predict:
   model_dir: "output/best_model"
-  input_path: "/paddle/dataset/paddlex/cls/cls_flowers_examples/images/image_00002.jpg"
+  input_path: "https://paddle-model-ecology.bj.bcebos.com/paddlex/imgs/demo_image/general_image_classification_001.jpg"
   kernel_option:
     run_mode: paddle
     batch_size: 1

+ 1 - 1
paddlex/configs/image_classification/MobileNetV3_large_x0_5.yaml

@@ -32,7 +32,7 @@ Evaluate:
 
 Predict:
   model_dir: "output/best_model"
-  input_path: "/paddle/dataset/paddlex/cls/cls_flowers_examples/images/image_00002.jpg"
+  input_path: "https://paddle-model-ecology.bj.bcebos.com/paddlex/imgs/demo_image/general_image_classification_001.jpg"
   kernel_option:
     run_mode: paddle
     batch_size: 1

+ 1 - 1
paddlex/configs/image_classification/MobileNetV3_large_x0_75.yaml

@@ -32,7 +32,7 @@ Evaluate:
 
 Predict:
   model_dir: "output/best_model"
-  input_path: "/paddle/dataset/paddlex/cls/cls_flowers_examples/images/image_00002.jpg"
+  input_path: "https://paddle-model-ecology.bj.bcebos.com/paddlex/imgs/demo_image/general_image_classification_001.jpg"
   kernel_option:
     run_mode: paddle
     batch_size: 1

+ 1 - 1
paddlex/configs/image_classification/MobileNetV3_large_x1_0.yaml

@@ -32,7 +32,7 @@ Evaluate:
 
 Predict:
   model_dir: "output/best_model"
-  input_path: "/paddle/dataset/paddlex/cls/cls_flowers_examples/images/image_00002.jpg"
+  input_path: "https://paddle-model-ecology.bj.bcebos.com/paddlex/imgs/demo_image/general_image_classification_001.jpg"
   kernel_option:
     run_mode: paddle
     batch_size: 1

+ 1 - 1
paddlex/configs/image_classification/MobileNetV3_large_x1_25.yaml

@@ -32,7 +32,7 @@ Evaluate:
 
 Predict:
   model_dir: "output/best_model"
-  input_path: "/paddle/dataset/paddlex/cls/cls_flowers_examples/images/image_00002.jpg"
+  input_path: "https://paddle-model-ecology.bj.bcebos.com/paddlex/imgs/demo_image/general_image_classification_001.jpg"
   kernel_option:
     run_mode: paddle
     batch_size: 1

+ 1 - 1
paddlex/configs/image_classification/MobileNetV3_small_x0_35.yaml

@@ -32,7 +32,7 @@ Evaluate:
 
 Predict:
   model_dir: "output/best_model"
-  input_path: "/paddle/dataset/paddlex/cls/cls_flowers_examples/images/image_00002.jpg"
+  input_path: "https://paddle-model-ecology.bj.bcebos.com/paddlex/imgs/demo_image/general_image_classification_001.jpg"
   kernel_option:
     run_mode: paddle
     batch_size: 1

+ 1 - 1
paddlex/configs/image_classification/MobileNetV3_small_x0_5.yaml

@@ -32,7 +32,7 @@ Evaluate:
 
 Predict:
   model_dir: "output/best_model"
-  input_path: "/paddle/dataset/paddlex/cls/cls_flowers_examples/images/image_00002.jpg"
+  input_path: "https://paddle-model-ecology.bj.bcebos.com/paddlex/imgs/demo_image/general_image_classification_001.jpg"
   kernel_option:
     run_mode: paddle
     batch_size: 1

+ 1 - 1
paddlex/configs/image_classification/MobileNetV3_small_x0_75.yaml

@@ -32,7 +32,7 @@ Evaluate:
 
 Predict:
   model_dir: "output/best_model"
-  input_path: "/paddle/dataset/paddlex/cls/cls_flowers_examples/images/image_00002.jpg"
+  input_path: "https://paddle-model-ecology.bj.bcebos.com/paddlex/imgs/demo_image/general_image_classification_001.jpg"
   kernel_option:
     run_mode: paddle
     batch_size: 1

+ 1 - 1
paddlex/configs/image_classification/MobileNetV3_small_x1_0.yaml

@@ -32,7 +32,7 @@ Evaluate:
 
 Predict:
   model_dir: "output/best_model"
-  input_path: "/paddle/dataset/paddlex/cls/cls_flowers_examples/images/image_00002.jpg"
+  input_path: "https://paddle-model-ecology.bj.bcebos.com/paddlex/imgs/demo_image/general_image_classification_001.jpg"
   kernel_option:
     run_mode: paddle
     batch_size: 1

+ 1 - 1
paddlex/configs/image_classification/MobileNetV3_small_x1_25.yaml

@@ -32,7 +32,7 @@ Evaluate:
 
 Predict:
   model_dir: "output/best_model"
-  input_path: "/paddle/dataset/paddlex/cls/cls_flowers_examples/images/image_00002.jpg"
+  input_path: "https://paddle-model-ecology.bj.bcebos.com/paddlex/imgs/demo_image/general_image_classification_001.jpg"
   kernel_option:
     run_mode: paddle
     batch_size: 1

+ 1 - 1
paddlex/configs/image_classification/PP-HGNetV2-B0.yaml

@@ -32,7 +32,7 @@ Evaluate:
 
 Predict:
   model_dir: "output/best_model"
-  input_path: "/paddle/dataset/paddlex/cls/cls_flowers_examples/images/image_00002.jpg"
+  input_path: "https://paddle-model-ecology.bj.bcebos.com/paddlex/imgs/demo_image/general_image_classification_001.jpg"
   kernel_option:
     run_mode: paddle
     batch_size: 1

+ 1 - 1
paddlex/configs/image_classification/PP-HGNetV2-B4.yaml

@@ -32,7 +32,7 @@ Evaluate:
 
 Predict:
   model_dir: "output/best_model"
-  input_path: "/paddle/dataset/paddlex/cls/cls_flowers_examples/images/image_00002.jpg"
+  input_path: "https://paddle-model-ecology.bj.bcebos.com/paddlex/imgs/demo_image/general_image_classification_001.jpg"
   kernel_option:
     run_mode: paddle
     batch_size: 1

+ 1 - 1
paddlex/configs/image_classification/PP-HGNetV2-B6.yaml

@@ -32,7 +32,7 @@ Evaluate:
 
 Predict:
   model_dir: "output/best_model"
-  input_path: "/paddle/dataset/paddlex/cls/cls_flowers_examples/images/image_00002.jpg"
+  input_path: "https://paddle-model-ecology.bj.bcebos.com/paddlex/imgs/demo_image/general_image_classification_001.jpg"
   kernel_option:
     run_mode: paddle
     batch_size: 1

+ 1 - 1
paddlex/configs/image_classification/PP-HGNet_small.yaml

@@ -32,7 +32,7 @@ Evaluate:
 
 Predict:
   model_dir: "output/best_model"
-  input_path: "/paddle/dataset/paddlex/cls/cls_flowers_examples/images/image_00002.jpg"
+  input_path: "https://paddle-model-ecology.bj.bcebos.com/paddlex/imgs/demo_image/general_image_classification_001.jpg"
   kernel_option:
     run_mode: paddle
     batch_size: 1

+ 1 - 1
paddlex/configs/image_classification/PP-LCNet_x0_25.yaml

@@ -32,7 +32,7 @@ Evaluate:
 
 Predict:
   model_dir: "output/best_model"
-  input_path: "/paddle/dataset/paddlex/cls/cls_flowers_examples/images/image_00002.jpg"
+  input_path: "https://paddle-model-ecology.bj.bcebos.com/paddlex/imgs/demo_image/general_image_classification_001.jpg"
   kernel_option:
     run_mode: paddle
     batch_size: 1

+ 1 - 1
paddlex/configs/image_classification/PP-LCNet_x0_35.yaml

@@ -32,7 +32,7 @@ Evaluate:
 
 Predict:
   model_dir: "output/best_model"
-  input_path: "/paddle/dataset/paddlex/cls/cls_flowers_examples/images/image_00002.jpg"
+  input_path: "https://paddle-model-ecology.bj.bcebos.com/paddlex/imgs/demo_image/general_image_classification_001.jpg"
   kernel_option:
     run_mode: paddle
     batch_size: 1

+ 1 - 1
paddlex/configs/image_classification/PP-LCNet_x0_5.yaml

@@ -32,7 +32,7 @@ Evaluate:
 
 Predict:
   model_dir: "output/best_model"
-  input_path: "/paddle/dataset/paddlex/cls/cls_flowers_examples/images/image_00002.jpg"
+  input_path: "https://paddle-model-ecology.bj.bcebos.com/paddlex/imgs/demo_image/general_image_classification_001.jpg"
   kernel_option:
     run_mode: paddle
     batch_size: 1

+ 1 - 1
paddlex/configs/image_classification/PP-LCNet_x0_75.yaml

@@ -32,7 +32,7 @@ Evaluate:
 
 Predict:
   model_dir: "output/best_model"
-  input_path: "/paddle/dataset/paddlex/cls/cls_flowers_examples/images/image_00002.jpg"
+  input_path: "https://paddle-model-ecology.bj.bcebos.com/paddlex/imgs/demo_image/general_image_classification_001.jpg"
   kernel_option:
     run_mode: paddle
     batch_size: 1

+ 1 - 1
paddlex/configs/image_classification/PP-LCNet_x1_0.yaml

@@ -32,7 +32,7 @@ Evaluate:
 
 Predict:
   model_dir: "output/best_model"
-  input_path: "/paddle/dataset/paddlex/cls/cls_flowers_examples/images/image_00002.jpg"
+  input_path: "https://paddle-model-ecology.bj.bcebos.com/paddlex/imgs/demo_image/general_image_classification_001.jpg"
   kernel_option:
     run_mode: paddle
     batch_size: 1

+ 1 - 1
paddlex/configs/image_classification/PP-LCNet_x1_5.yaml

@@ -32,7 +32,7 @@ Evaluate:
 
 Predict:
   model_dir: "output/best_model"
-  input_path: "/paddle/dataset/paddlex/cls/cls_flowers_examples/images/image_00002.jpg"
+  input_path: "https://paddle-model-ecology.bj.bcebos.com/paddlex/imgs/demo_image/general_image_classification_001.jpg"
   kernel_option:
     run_mode: paddle
     batch_size: 1

+ 1 - 1
paddlex/configs/image_classification/PP-LCNet_x2_0.yaml

@@ -32,7 +32,7 @@ Evaluate:
 
 Predict:
   model_dir: "output/best_model"
-  input_path: "/paddle/dataset/paddlex/cls/cls_flowers_examples/images/image_00002.jpg"
+  input_path: "https://paddle-model-ecology.bj.bcebos.com/paddlex/imgs/demo_image/general_image_classification_001.jpg"
   kernel_option:
     run_mode: paddle
     batch_size: 1

+ 1 - 1
paddlex/configs/image_classification/PP-LCNet_x2_5.yaml

@@ -32,7 +32,7 @@ Evaluate:
 
 Predict:
   model_dir: "output/best_model"
-  input_path: "/paddle/dataset/paddlex/cls/cls_flowers_examples/images/image_00002.jpg"
+  input_path: "https://paddle-model-ecology.bj.bcebos.com/paddlex/imgs/demo_image/general_image_classification_001.jpg"
   kernel_option:
     run_mode: paddle
     batch_size: 1

+ 1 - 1
paddlex/configs/image_classification/ResNet101.yaml

@@ -32,7 +32,7 @@ Evaluate:
 
 Predict:
   model_dir: "output/best_model"
-  input_path: "/paddle/dataset/paddlex/cls/cls_flowers_examples/images/image_00002.jpg"
+  input_path: "https://paddle-model-ecology.bj.bcebos.com/paddlex/imgs/demo_image/general_image_classification_001.jpg"
   kernel_option:
     run_mode: paddle
     batch_size: 1

+ 1 - 1
paddlex/configs/image_classification/ResNet152.yaml

@@ -32,7 +32,7 @@ Evaluate:
 
 Predict:
   model_dir: "output/best_model"
-  input_path: "/paddle/dataset/paddlex/cls/cls_flowers_examples/images/image_00002.jpg"
+  input_path: "https://paddle-model-ecology.bj.bcebos.com/paddlex/imgs/demo_image/general_image_classification_001.jpg"
   kernel_option:
     run_mode: paddle
     batch_size: 1

+ 1 - 1
paddlex/configs/image_classification/ResNet18.yaml

@@ -32,7 +32,7 @@ Evaluate:
 
 Predict:
   model_dir: "output/best_model"
-  input_path: "/paddle/dataset/paddlex/cls/cls_flowers_examples/images/image_00002.jpg"
+  input_path: "https://paddle-model-ecology.bj.bcebos.com/paddlex/imgs/demo_image/general_image_classification_001.jpg"
   kernel_option:
     run_mode: paddle
     batch_size: 1

+ 1 - 1
paddlex/configs/image_classification/ResNet34.yaml

@@ -32,7 +32,7 @@ Evaluate:
 
 Predict:
   model_dir: "output/best_model"
-  input_path: "/paddle/dataset/paddlex/cls/cls_flowers_examples/images/image_00002.jpg"
+  input_path: "https://paddle-model-ecology.bj.bcebos.com/paddlex/imgs/demo_image/general_image_classification_001.jpg"
   kernel_option:
     run_mode: paddle
     batch_size: 1

+ 1 - 1
paddlex/configs/image_classification/ResNet50.yaml

@@ -32,7 +32,7 @@ Evaluate:
 
 Predict:
   model_dir: "output/best_model"
-  input_path: "/paddle/dataset/paddlex/cls/cls_flowers_examples/images/image_00002.jpg"
+  input_path: "https://paddle-model-ecology.bj.bcebos.com/paddlex/imgs/demo_image/general_image_classification_001.jpg"
   kernel_option:
     run_mode: paddle
     batch_size: 1

+ 1 - 1
paddlex/configs/image_classification/SwinTransformer_base_patch4_window7_224.yaml

@@ -32,7 +32,7 @@ Evaluate:
 
 Predict:
   model_dir: "output/best_model"
-  input_path: "/paddle/dataset/paddlex/cls/cls_flowers_examples/images/image_00002.jpg"
+  input_path: "https://paddle-model-ecology.bj.bcebos.com/paddlex/imgs/demo_image/general_image_classification_001.jpg"
   kernel_option:
     run_mode: paddle
     batch_size: 1

+ 1 - 1
paddlex/configs/instance_segmentation/Mask-RT-DETR-H.yaml

@@ -31,7 +31,7 @@ Evaluate:
 
 Predict:
   model_dir: "output/best_model"
-  input_path: "/paddle/dataset/paddlex/instance_seg/instance_seg_coco_examples/images/aircraft-women-fashion-pilot-48797.png"
+  input_path: "https://paddle-model-ecology.bj.bcebos.com/paddlex/imgs/demo_image/general_instance_segmentation_004.png"
   kernel_option:
     run_mode: paddle
     batch_size: 1

+ 1 - 1
paddlex/configs/instance_segmentation/Mask-RT-DETR-L.yaml

@@ -31,7 +31,7 @@ Evaluate:
 
 Predict:
   model_dir: "output/best_model"
-  input_path: "/paddle/dataset/paddlex/instance_seg/instance_seg_coco_examples/images/aircraft-women-fashion-pilot-48797.png"
+  input_path: "https://paddle-model-ecology.bj.bcebos.com/paddlex/imgs/demo_image/general_instance_segmentation_004.png"
   kernel_option:
     run_mode: paddle
     batch_size: 1

+ 1 - 1
paddlex/configs/object_detection/PP-YOLOE_plus-L.yaml

@@ -31,7 +31,7 @@ Evaluate:
 
 Predict:
   model_dir: "output/best_model"
-  input_path: "/paddle/dataset/paddlex/det/det_coco_examples/images/road0.png"
+  input_path: "https://paddle-model-ecology.bj.bcebos.com/paddlex/imgs/demo_image/general_object_detection_002.png"
   kernel_option:
     run_mode: paddle
     batch_size: 1

+ 1 - 1
paddlex/configs/object_detection/PP-YOLOE_plus-M.yaml

@@ -31,7 +31,7 @@ Evaluate:
 
 Predict:
   model_dir: "output/best_model"
-  input_path: "/paddle/dataset/paddlex/det/det_coco_examples/images/road0.png"
+  input_path: "https://paddle-model-ecology.bj.bcebos.com/paddlex/imgs/demo_image/general_object_detection_002.png"
   kernel_option:
     run_mode: paddle
     batch_size: 1

+ 1 - 1
paddlex/configs/object_detection/PP-YOLOE_plus-S.yaml

@@ -31,7 +31,7 @@ Evaluate:
 
 Predict:
   model_dir: "output/best_model"
-  input_path: "/paddle/dataset/paddlex/det/det_coco_examples/images/road0.png"
+  input_path: "https://paddle-model-ecology.bj.bcebos.com/paddlex/imgs/demo_image/general_object_detection_002.png"
   kernel_option:
     run_mode: paddle
     batch_size: 1

+ 1 - 1
paddlex/configs/object_detection/PP-YOLOE_plus-X.yaml

@@ -31,7 +31,7 @@ Evaluate:
 
 Predict:
   model_dir: "output/best_model"
-  input_path: "/paddle/dataset/paddlex/det/det_coco_examples/images/road0.png"
+  input_path: "https://paddle-model-ecology.bj.bcebos.com/paddlex/imgs/demo_image/general_object_detection_002.png"
   kernel_option:
     run_mode: paddle
     batch_size: 1

+ 1 - 1
paddlex/configs/object_detection/PicoDet-L.yaml

@@ -31,7 +31,7 @@ Evaluate:
 
 Predict:
   model_dir: "output/best_model"
-  input_path: "/paddle/dataset/paddlex/det/det_coco_examples/images/road0.png"
+  input_path: "https://paddle-model-ecology.bj.bcebos.com/paddlex/imgs/demo_image/general_object_detection_002.png"
   kernel_option:
     run_mode: paddle
     batch_size: 1

+ 1 - 1
paddlex/configs/object_detection/PicoDet-S.yaml

@@ -31,7 +31,7 @@ Evaluate:
 
 Predict:
   model_dir: "output/best_model"
-  input_path: "/paddle/dataset/paddlex/det/det_coco_examples/images/road0.png"
+  input_path: "https://paddle-model-ecology.bj.bcebos.com/paddlex/imgs/demo_image/general_object_detection_002.png"
   kernel_option:
     run_mode: paddle
     batch_size: 1

+ 1 - 1
paddlex/configs/object_detection/RT-DETR-H.yaml

@@ -31,7 +31,7 @@ Evaluate:
 
 Predict:
   model_dir: "output/best_model"
-  input_path: "/paddle/dataset/paddlex/det/det_coco_examples/images/road0.png"
+  input_path: "https://paddle-model-ecology.bj.bcebos.com/paddlex/imgs/demo_image/general_object_detection_002.png"
   kernel_option:
     run_mode: paddle
     batch_size: 1

+ 1 - 1
paddlex/configs/object_detection/RT-DETR-L.yaml

@@ -31,7 +31,7 @@ Evaluate:
 
 Predict:
   model_dir: "output/best_model"
-  input_path: "/paddle/dataset/paddlex/det/det_coco_examples/images/road0.png"
+  input_path: "https://paddle-model-ecology.bj.bcebos.com/paddlex/imgs/demo_image/general_object_detection_002.png"
   kernel_option:
     run_mode: paddle
     batch_size: 1

+ 1 - 1
paddlex/configs/object_detection/RT-DETR-R18.yaml

@@ -31,7 +31,7 @@ Evaluate:
 
 Predict:
   model_dir: "output/best_model"
-  input_path: "/paddle/dataset/paddlex/det/det_coco_examples/images/road0.png"
+  input_path: "https://paddle-model-ecology.bj.bcebos.com/paddlex/imgs/demo_image/general_object_detection_002.png"
   kernel_option:
     run_mode: paddle
     batch_size: 1

+ 1 - 1
paddlex/configs/object_detection/RT-DETR-R50.yaml

@@ -31,7 +31,7 @@ Evaluate:
 
 Predict:
   model_dir: "output/best_model"
-  input_path: "/paddle/dataset/paddlex/det/det_coco_examples/images/road0.png"
+  input_path: "https://paddle-model-ecology.bj.bcebos.com/paddlex/imgs/demo_image/general_object_detection_002.png"
   kernel_option:
     run_mode: paddle
     batch_size: 1

+ 1 - 1
paddlex/configs/object_detection/RT-DETR-X.yaml

@@ -31,7 +31,7 @@ Evaluate:
 
 Predict:
   model_dir: "output/best_model"
-  input_path: "/paddle/dataset/paddlex/det/det_coco_examples/images/road0.png"
+  input_path: "https://paddle-model-ecology.bj.bcebos.com/paddlex/imgs/demo_image/general_object_detection_002.png"
   kernel_option:
     run_mode: paddle
     batch_size: 1

+ 1 - 1
paddlex/configs/semantic_segmentation/Deeplabv3-R101.yaml

@@ -31,7 +31,7 @@ Evaluate:
 
 Predict:
   model_dir: "output/best_model/model"
-  input_path: "/paddle/dataset/paddlex/seg/seg_optic_examples/images/H0002.jpg"
+  input_path: "https://paddle-model-ecology.bj.bcebos.com/paddlex/imgs/demo_image/general_semantic_segmentation_002.png"
   kernel_option:
     run_mode: paddle
     batch_size: 1

+ 1 - 1
paddlex/configs/semantic_segmentation/Deeplabv3-R50.yaml

@@ -31,7 +31,7 @@ Evaluate:
 
 Predict:
   model_dir: "output/best_model/model"
-  input_path: "/paddle/dataset/paddlex/seg/seg_optic_examples/images/H0002.jpg"
+  input_path: "https://paddle-model-ecology.bj.bcebos.com/paddlex/imgs/demo_image/general_semantic_segmentation_002.png"
   kernel_option:
     run_mode: paddle
     batch_size: 1

+ 1 - 1
paddlex/configs/semantic_segmentation/Deeplabv3_Plus-R101.yaml

@@ -31,7 +31,7 @@ Evaluate:
 
 Predict:
   model_dir: "output/best_model/model"
-  input_path: "/paddle/dataset/paddlex/seg/seg_optic_examples/images/H0002.jpg"
+  input_path: "https://paddle-model-ecology.bj.bcebos.com/paddlex/imgs/demo_image/general_semantic_segmentation_002.png"
   kernel_option:
     run_mode: paddle
     batch_size: 1

+ 1 - 1
paddlex/configs/semantic_segmentation/Deeplabv3_Plus-R50.yaml

@@ -31,7 +31,7 @@ Evaluate:
 
 Predict:
   model_dir: "output/best_model/model"
-  input_path: "/paddle/dataset/paddlex/seg/seg_optic_examples/images/H0002.jpg"
+  input_path: "https://paddle-model-ecology.bj.bcebos.com/paddlex/imgs/demo_image/general_semantic_segmentation_002.png"
   kernel_option:
     run_mode: paddle
     batch_size: 1

+ 1 - 1
paddlex/configs/semantic_segmentation/OCRNet_HRNet-W48.yaml

@@ -31,7 +31,7 @@ Evaluate:
 
 Predict:
   model_dir: "output/best_model/model"
-  input_path: "/paddle/dataset/paddlex/seg/seg_optic_examples/images/H0002.jpg"
+  input_path: "https://paddle-model-ecology.bj.bcebos.com/paddlex/imgs/demo_image/general_semantic_segmentation_002.png"
   kernel_option:
     run_mode: paddle
     batch_size: 1

+ 1 - 1
paddlex/configs/semantic_segmentation/PP-LiteSeg-T.yaml

@@ -31,7 +31,7 @@ Evaluate:
 
 Predict:
   model_dir: "output/best_model/model"
-  input_path: "/paddle/dataset/paddlex/seg/seg_optic_examples/images/H0002.jpg"
+  input_path: "https://paddle-model-ecology.bj.bcebos.com/paddlex/imgs/demo_image/general_semantic_segmentation_002.png"
   kernel_option:
     run_mode: paddle
     batch_size: 1

+ 1 - 1
paddlex/configs/text_detection/PP-OCRv4_mobile_det.yaml

@@ -31,7 +31,7 @@ Evaluate:
 
 Predict:
   model_dir: "output/best_accuracy"
-  input_path: "/paddle/dataset/paddlex/ocr_det/ocr_det_dataset_examples/images/train_img_100.jpg"
+  input_path: "https://paddle-model-ecology.bj.bcebos.com/paddlex/imgs/demo_image/general_ocr_001.png"
   kernel_option:
     run_mode: paddle
     batch_size: 1

+ 1 - 1
paddlex/configs/text_detection/PP-OCRv4_server_det.yaml

@@ -31,7 +31,7 @@ Evaluate:
 
 Predict:
   model_dir: "output/best_accuracy"
-  input_path: "/paddle/dataset/paddlex/ocr_det/ocr_det_dataset_examples/images/train_img_100.jpg"
+  input_path: "https://paddle-model-ecology.bj.bcebos.com/paddlex/imgs/demo_image/general_ocr_001.png"
   kernel_option:
     run_mode: paddle
     batch_size: 1

+ 1 - 1
paddlex/configs/text_recognition/PP-OCRv4_mobile_rec.yaml

@@ -30,7 +30,7 @@ Evaluate:
 
 Predict:
   model_dir: "output/best_accuracy"
-  input_path: "/paddle/dataset/paddlex/ocr_rec/ocr_rec_dataset_examples/images/train_word_1003.png"
+  input_path: "https://paddle-model-ecology.bj.bcebos.com/paddlex/imgs/demo_image/general_ocr_rec_001.png"
   kernel_option:
     run_mode: paddle
     batch_size: 1

+ 1 - 1
paddlex/configs/text_recognition/PP-OCRv4_server_rec.yaml

@@ -30,7 +30,7 @@ Evaluate:
 
 Predict:
   model_dir: "output/best_accuracy"
-  input_path: "/paddle/dataset/paddlex/ocr_rec/ocr_rec_dataset_examples/images/train_word_1003.png"
+  input_path: "https://paddle-model-ecology.bj.bcebos.com/paddlex/imgs/demo_image/general_ocr_rec_001.png"
   kernel_option:
     run_mode: paddle
     batch_size: 1

+ 2 - 2
paddlex/engine.py

@@ -29,14 +29,14 @@ class Engine(object):
         self.config = config.get_config(
             args.config, overrides=args.override, show=False)
         self.mode = self.config.Global.mode
-        self.output_dir = self.config.Global.output
+        self.output = self.config.Global.output
 
     @try_except_decorator
     def run(self):
         """ the main function """
         if self.config.Global.mode == "check_dataset":
             dataset_checker = build_dataset_checker(self.config)
-            return dataset_checker.check_dataset()
+            return dataset_checker.check()
         elif self.config.Global.mode == "train":
             trainer = build_trainer(self.config)
             trainer.train()

+ 4 - 5
paddlex/modules/base/dataset_checker/dataset_checker.py

@@ -49,10 +49,9 @@ class BaseDatasetChecker(ABC, metaclass=AutoRegisterABCMetaClass):
         super().__init__()
         self.global_config = config.Global
         self.check_dataset_config = config.CheckDataset
-        self.output_dir = os.path.join(self.global_config.output,
-                                       "check_dataset")
+        self.output = os.path.join(self.global_config.output, "check_dataset")
 
-    def check_dataset(self) -> dict:
+    def check(self) -> dict:
         """execute dataset checking
 
         Returns:
@@ -60,8 +59,8 @@ class BaseDatasetChecker(ABC, metaclass=AutoRegisterABCMetaClass):
         """
         dataset_dir = self.get_dataset_root(self.global_config.dataset_dir)
 
-        if not os.path.exists(self.output_dir):
-            os.makedirs(self.output_dir)
+        if not os.path.exists(self.output):
+            os.makedirs(self.output)
 
         if self.check_dataset_config.get("convert", None):
             if self.check_dataset_config.convert.get("enable", False):

+ 7 - 12
paddlex/modules/base/evaluator.py

@@ -100,7 +100,13 @@ class BaseEvaluator(ABC, metaclass=AutoRegisterABCMetaClass):
         Returns:
             dict: the evaluation metrics
         """
-        metrics = self.eval()
+        self.update_config()
+        # self.dump_config()
+        evaluate_result = self.pdx_model.evaluate(**self.get_eval_kwargs())
+        assert evaluate_result.returncode == 0, f"Encountered an unexpected error({evaluate_result.returncode}) in \
+evaling!"
+
+        metrics = evaluate_result.metrics
         assert self.check_return(
             metrics
         ), f"The return value({metrics}) of Evaluator.eval() is illegal!"
@@ -118,17 +124,6 @@ class BaseEvaluator(ABC, metaclass=AutoRegisterABCMetaClass):
                                             "config.yaml")
         self.pdx_config.dump(config_file_path)
 
-    def eval(self):
-        """firstly, update evaluation config, then evaluate model, finally return the evaluation result
-        """
-        self.update_config()
-        # self.dump_config()
-        evaluate_result = self.pdx_model.evaluate(**self.get_eval_kwargs())
-        assert evaluate_result.returncode == 0, f"Encountered an unexpected error({evaluate_result.returncode}) in \
-evaling!"
-
-        return evaluate_result.metrics
-
     def get_device(self, using_device_number: int=None) -> str:
         """get device setting from config
 

+ 5 - 5
paddlex/modules/base/predictor/predictor.py

@@ -38,13 +38,13 @@ class BasePredictor(ABC, FromDictMixin, Node):
     def __init__(self,
                  model_dir,
                  kernel_option,
-                 output_dir,
+                 output,
                  pre_transforms=None,
                  post_transforms=None):
         super().__init__()
         self.model_dir = model_dir
         self.kernel_option = kernel_option
-        self.output_dir = output_dir
+        self.output = output
         self.other_src = self.load_other_src()
 
         logging.debug(
@@ -171,7 +171,7 @@ class PredictorBuilderByConfig(object):
         self.predictor = BasePredictor.get(model_name)(
             model_dir=model_dir,
             kernel_option=kernel_option,
-            output_dir=config.Global.output_dir,
+            output=config.Global.output,
             **predict_config)
 
     def predict(self):
@@ -189,7 +189,7 @@ def build_predictor(*args, **kwargs):
 def create_model(model_name,
                  model_dir=None,
                  kernel_option=None,
-                 output_dir=None,
+                 output="./",
                  pre_transforms=None,
                  post_transforms=None,
                  *args,
@@ -206,7 +206,7 @@ def create_model(model_name,
             BasePredictor.get(model_name)
     return BasePredictor.get(model_name)(model_dir=model_dir,
                                          kernel_option=kernel_option,
-                                         output_dir=output_dir,
+                                         output=output,
                                          pre_transforms=pre_transforms,
                                          post_transforms=post_transforms,
                                          *args,

+ 1 - 1
paddlex/modules/base/predictor/utils/official_models.py

@@ -171,7 +171,7 @@ class OfficialModelsDict(dict):
     def __getitem__(self, key):
         url = super().__getitem__(key)
         save_dir = Path(CACHE_DIR) / "official_models"
-        download_and_extract(url, save_dir, f"{key}", overwrite=True)
+        download_and_extract(url, save_dir, f"{key}", overwrite=False)
         return save_dir / f"{key}"
 
 

+ 4 - 5
paddlex/modules/base/trainer/train_deamon.py

@@ -51,7 +51,7 @@ class BaseTrainDeamon(ABC):
         """ init """
         self.global_config = global_config
         self.init_pre_hook()
-        self.output_dir = global_config.output
+        self.output = global_config.output
         self.train_outputs = self.get_train_outputs()
         self.save_paths = self.get_save_paths()
         self.results = self.init_train_result()
@@ -90,7 +90,7 @@ class BaseTrainDeamon(ABC):
 
     def get_train_outputs(self):
         """ get training outputs dir """
-        return [Path(self.output_dir)]
+        return [Path(self.output)]
 
     def init_model_names(self):
         """ get models name """
@@ -99,8 +99,7 @@ class BaseTrainDeamon(ABC):
     def get_save_paths(self):
         """ get the path to save train_result.json """
         return [
-            Path(self.output_dir, save_name)
-            for save_name in self.get_save_names()
+            Path(self.output, save_name) for save_name in self.get_save_names()
         ]
 
     def init_configs(self):
@@ -125,7 +124,7 @@ class BaseTrainDeamon(ABC):
         return model_pkg
 
     def normlize_path(self, dict_obj, relative_to):
-        """ normlize path to string type path relative to the output_dir """
+        """ normlize path to string type path relative to the output """
         for key in dict_obj:
             if isinstance(dict_obj[key], dict):
                 self.normlize_path(dict_obj[key], relative_to)

+ 6 - 10
paddlex/modules/base/trainer/trainer.py

@@ -56,7 +56,12 @@ class BaseTrainer(ABC, metaclass=AutoRegisterABCMetaClass):
         """execute model training
         """
         os.makedirs(self.global_config.output, exist_ok=True)
-        self.train(*args, **kwargs)
+        self.update_config()
+        self.dump_config()
+        train_result = self.pdx_model.train(**self.get_train_kwargs())
+        assert train_result.returncode == 0, f"Encountered an unexpected error({train_result.returncode}) in \
+training!"
+
         self.deamon.stop()
 
     def dump_config(self, config_file_path: str=None):
@@ -71,15 +76,6 @@ class BaseTrainer(ABC, metaclass=AutoRegisterABCMetaClass):
                                             "config.yaml")
         self.pdx_config.dump(config_file_path)
 
-    def train(self):
-        """firstly, update and dump train config, then train model
-        """
-        self.update_config()
-        self.dump_config()
-        train_result = self.pdx_model.train(**self.get_train_kwargs())
-        assert train_result.returncode == 0, f"Encountered an unexpected error({train_result.returncode}) in \
-training!"
-
     def get_device(self, using_device_number: int=None) -> str:
         """get device setting from config
 

+ 4 - 4
paddlex/modules/image_classification/dataset_checker/__init__.py

@@ -17,13 +17,13 @@ from pathlib import Path
 
 from ...base import BaseDatasetChecker
 from .dataset_src import check, split_dataset, deep_analyse
-from ..support_models import SUPPORT_MODELS
+from ..model_list import MODELS
 
 
 class ClsDatasetChecker(BaseDatasetChecker):
     """Dataset Checker for Image Classification Model
     """
-    support_models = SUPPORT_MODELS
+    entities = MODELS
     sample_num = 10
 
     def get_dataset_root(self, dataset_dir: str) -> str:
@@ -74,7 +74,7 @@ class ClsDatasetChecker(BaseDatasetChecker):
         Returns:
             dict: dataset summary.
         """
-        return check(dataset_dir, self.output_dir)
+        return check(dataset_dir, self.output)
 
     def analyse(self, dataset_dir: str) -> dict:
         """deep analyse dataset
@@ -85,7 +85,7 @@ class ClsDatasetChecker(BaseDatasetChecker):
         Returns:
             dict: the deep analysis results.
         """
-        return deep_analyse(dataset_dir, self.output_dir)
+        return deep_analyse(dataset_dir, self.output)
 
     def get_show_type(self) -> str:
         """get the show type of dataset

+ 2 - 2
paddlex/modules/image_classification/dataset_checker/dataset_src/analyse_dataset.py

@@ -31,7 +31,7 @@ from .....utils.file_interface import custom_open
 from .....utils.fonts import PINGFANG_FONT_FILE_PATH
 
 
-def deep_analyse(dataset_path, output_dir):
+def deep_analyse(dataset_path, output):
     """class analysis for dataset"""
     tags = ['train', 'val']
     labels_cnt = defaultdict(str)
@@ -90,7 +90,7 @@ def deep_analyse(dataset_path, output_dir):
         fontsize=12)
     plt.legend(loc=1)
     fig.tight_layout()
-    file_path = os.path.join(output_dir, "histogram.png")
+    file_path = os.path.join(output, "histogram.png")
     fig.savefig(file_path, dpi=300)
 
     return {"histogram": os.path.join("check_dataset", "histogram.png")}

+ 4 - 5
paddlex/modules/image_classification/dataset_checker/dataset_src/check_dataset.py

@@ -24,7 +24,7 @@ from .....utils.errors import DatasetFileNotFoundError, CheckFailedError
 from .utils.visualizer import draw_label
 
 
-def check(dataset_dir, output_dir, sample_num=10):
+def check(dataset_dir, output, sample_num=10):
     """ check dataset """
     dataset_dir = osp.abspath(dataset_dir)
     # Custom dataset
@@ -95,7 +95,7 @@ def check(dataset_dir, output_dir, sample_num=10):
                     if not osp.exists(img_path):
                         raise DatasetFileNotFoundError(file_path=img_path)
 
-                    vis_save_dir = osp.join(output_dir, 'demo_img')
+                    vis_save_dir = osp.join(output, 'demo_img')
                     if not osp.exists(vis_save_dir):
                         os.makedirs(vis_save_dir)
 
@@ -107,8 +107,7 @@ def check(dataset_dir, output_dir, sample_num=10):
                                             osp.basename(file_name))
                         vis_im.save(vis_path)
                         sample_path = osp.join(
-                            'check_dataset',
-                            os.path.relpath(vis_path, output_dir))
+                            'check_dataset', os.path.relpath(vis_path, output))
                         sample_paths[tag].append(sample_path)
 
                     try:
@@ -121,7 +120,7 @@ def check(dataset_dir, output_dir, sample_num=10):
     num_classes = max(labels) + 1
 
     attrs = {}
-    attrs['label_file'] = osp.relpath(label_file, output_dir)
+    attrs['label_file'] = osp.relpath(label_file, output)
     attrs['num_classes'] = num_classes
     attrs['train_samples'] = sample_cnts['train']
     attrs['train_sample_paths'] = sample_paths['train']

+ 2 - 2
paddlex/modules/image_classification/evaluator.py

@@ -14,12 +14,12 @@
 
 
 from ..base import BaseEvaluator
-from .support_models import SUPPORT_MODELS
+from .model_list import MODELS
 
 
 class ClsEvaluator(BaseEvaluator):
     """ Image Classification Model Evaluator """
-    support_models = SUPPORT_MODELS
+    entities = MODELS
 
     def update_config(self):
         """update evalution config

+ 1 - 1
paddlex/modules/image_classification/support_models.py → paddlex/modules/image_classification/model_list.py

@@ -13,7 +13,7 @@
 # limitations under the License.
 
 
-SUPPORT_MODELS = [
+MODELS = [
     'CLIP_vit_base_patch16_224',
     'CLIP_vit_large_patch14_224',
     'ConvNeXt_tiny',

+ 3 - 3
paddlex/modules/image_classification/predictor/predictor.py

@@ -24,12 +24,12 @@ from .keys import ClsKeys as K
 from .utils import InnerConfig
 from ....utils import logging
 from . import transforms as T
-from ..support_models import SUPPORT_MODELS
+from ..model_list import MODELS
 
 
 class ClsPredictor(BasePredictor):
     """ Clssification Predictor """
-    support_models = SUPPORT_MODELS
+    entities = MODELS
 
     def load_other_src(self):
         """ load the inner config file """
@@ -77,7 +77,7 @@ class ClsPredictor(BasePredictor):
         """ get postprocess transforms """
         post_transforms = self.other_src.post_transforms
         post_transforms.extend([
-            T.PrintResult(), T.SaveClsResults(self.output_dir,
+            T.PrintResult(), T.SaveClsResults(self.output,
                                               self.other_src.labels)
         ])
         return post_transforms

+ 1 - 1
paddlex/modules/image_classification/predictor/transforms.py

@@ -166,7 +166,7 @@ class SaveClsResults(BaseTransform):
         pred = data[K.CLS_PRED]
         index = pred.argsort(axis=0)[-1].astype("int32")
         score = pred[index].item()
-        label = self.class_id_map[int(index)]
+        label = self.class_id_map[int(index)] if self.class_id_map else ""
         label_str = f"{label} {score:.2f}"
         file_name = os.path.basename(ori_path)
         save_path = os.path.join(self.save_dir, file_name)

+ 5 - 2
paddlex/modules/image_classification/predictor/utils.py

@@ -73,13 +73,16 @@ class InnerConfig(object):
     @property
     def post_transforms(self):
         """ read postprocess transforms from config file """
+        IGNORE_OPS = ['main_indicator', 'SavePreLabel']
         tfs_cfg = self.inner_cfg['PostProcess']
         tfs = []
         for tf_key in tfs_cfg:
             if tf_key == 'Topk':
                 tf = T.Topk(
                     topk=tfs_cfg['Topk']['topk'],
-                    class_ids=tfs_cfg['Topk']['label_list'])
+                    class_ids=tfs_cfg['Topk'].get('label_list', None))
+            elif tf_key in IGNORE_OPS:
+                continue
             else:
                 raise RuntimeError(f"Unsupported type: {tf_key}")
             tfs.append(tf)
@@ -88,4 +91,4 @@ class InnerConfig(object):
     @property
     def labels(self):
         """ the labels in inner config """
-        return self.inner_cfg["PostProcess"]["Topk"]["label_list"]
+        return self.inner_cfg['PostProcess']['Topk'].get('label_list', None)

+ 2 - 2
paddlex/modules/image_classification/trainer.py

@@ -19,13 +19,13 @@ import paddle
 from pathlib import Path
 
 from ..base import BaseTrainer, BaseTrainDeamon
-from .support_models import SUPPORT_MODELS
+from .model_list import MODELS
 from ...utils.config import AttrDict
 
 
 class ClsTrainer(BaseTrainer):
     """ Image Classification Model Trainer """
-    support_models = SUPPORT_MODELS
+    entities = MODELS
 
     def dump_label_dict(self, src_label_dict_path: str):
         """dump label dict config

+ 4 - 4
paddlex/modules/instance_segmentation/dataset_checker/__init__.py

@@ -18,13 +18,13 @@ import os
 from .dataset_src import check, convert, split_dataset, deep_analyse
 from ...base import BaseDatasetChecker
 
-from ..support_models import SUPPORT_MODELS
+from ..model_list import MODELS
 
 
 class COCOInstSegDatasetChecker(BaseDatasetChecker):
     """Dataset Checker for Instance Segmentation Model
     """
-    support_models = SUPPORT_MODELS
+    entities = MODELS
     sample_num = 10
 
     def convert_dataset(self, src_dataset_dir: str) -> str:
@@ -62,7 +62,7 @@ class COCOInstSegDatasetChecker(BaseDatasetChecker):
         Returns:
             dict: dataset summary.
         """
-        return check(dataset_dir, self.output_dir)
+        return check(dataset_dir, self.output)
 
     def analyse(self, dataset_dir: str) -> dict:
         """deep analyse dataset
@@ -73,7 +73,7 @@ class COCOInstSegDatasetChecker(BaseDatasetChecker):
         Returns:
             dict: the deep analysis results.
         """
-        return deep_analyse(dataset_dir, self.output_dir)
+        return deep_analyse(dataset_dir, self.output)
 
     def get_show_type(self) -> str:
         """get the show type of dataset

+ 2 - 2
paddlex/modules/instance_segmentation/dataset_checker/dataset_src/analyse_dataset.py

@@ -27,7 +27,7 @@ from pycocotools.coco import COCO
 from .....utils.fonts import PINGFANG_FONT_FILE_PATH
 
 
-def deep_analyse(dataset_dir, output_dir):
+def deep_analyse(dataset_dir, output):
     """class analysis for dataset"""
     tags = ['train', 'val']
     all_instances = 0
@@ -76,6 +76,6 @@ def deep_analyse(dataset_dir, output_dir):
     ax.set_ylabel('Counts')
     plt.legend()
     fig.tight_layout()
-    fig_path = os.path.join(output_dir, "histogram.png")
+    fig_path = os.path.join(output, "histogram.png")
     fig.savefig(fig_path)
     return {"histogram": os.path.join("check_dataset", "histogram.png")}

+ 3 - 3
paddlex/modules/instance_segmentation/dataset_checker/dataset_src/check_dataset.py

@@ -27,7 +27,7 @@ from .....utils.errors import DatasetFileNotFoundError
 from .....utils.logging import info
 
 
-def check(dataset_dir, output_dir, sample_num=10):
+def check(dataset_dir, output, sample_num=10):
     """ check dataset """
     info(dataset_dir)
     dataset_dir = osp.abspath(dataset_dir)
@@ -58,7 +58,7 @@ def check(dataset_dir, output_dir, sample_num=10):
             coco = COCO(file_list)
             num_class = len(coco.getCatIds())
 
-            vis_save_dir = osp.join(output_dir, 'demo_img')
+            vis_save_dir = osp.join(output, 'demo_img')
 
             image_info = jsondata['images']
             for i in range(sample_num):
@@ -75,7 +75,7 @@ def check(dataset_dir, output_dir, sample_num=10):
                 Path(vis_path).parent.mkdir(parents=True, exist_ok=True)
                 vis_im.save(vis_path)
                 sample_path = osp.join('check_dataset',
-                                       os.path.relpath(vis_path, output_dir))
+                                       os.path.relpath(vis_path, output))
                 sample_paths[tag].append(sample_path)
 
     attrs = {}

+ 2 - 2
paddlex/modules/instance_segmentation/evaluator.py

@@ -14,12 +14,12 @@
 
 
 from ..object_detection import DetEvaluator
-from .support_models import SUPPORT_MODELS
+from .model_list import MODELS
 
 
 class InstanceSegEvaluator(DetEvaluator):
     """ Instance Segmentation Model Evaluator """
-    support_models = SUPPORT_MODELS
+    entities = MODELS
 
     def update_config(self):
         """update evalution config

+ 1 - 1
paddlex/modules/instance_segmentation/support_models.py → paddlex/modules/instance_segmentation/model_list.py

@@ -13,7 +13,7 @@
 # limitations under the License.
 
 
-SUPPORT_MODELS = [
+MODELS = [
     'Mask-RT-DETR-H',
     'Mask-RT-DETR-L',
 ]

+ 2 - 2
paddlex/modules/instance_segmentation/predictor/predictor.py

@@ -16,12 +16,12 @@
 import numpy as np
 from ...object_detection import DetPredictor
 from .keys import InstanceSegKeys as K
-from ..support_models import SUPPORT_MODELS
+from ..model_list import MODELS
 
 
 class InstanceSegPredictor(DetPredictor):
     """ Instance Seg Predictor """
-    support_models = SUPPORT_MODELS
+    entities = MODELS
 
     def _run(self, batch_input):
         """ run """

+ 2 - 2
paddlex/modules/instance_segmentation/trainer.py

@@ -14,12 +14,12 @@
 
 
 from ..object_detection import DetTrainer
-from .support_models import SUPPORT_MODELS
+from .model_list import MODELS
 
 
 class InstanceSegTrainer(DetTrainer):
     """ Instance Segmentation Model Trainer """
-    support_models = SUPPORT_MODELS
+    entities = MODELS
 
     def _update_dataset(self):
         """update dataset settings

+ 4 - 4
paddlex/modules/object_detection/dataset_checker/__init__.py

@@ -25,13 +25,13 @@ from pycocotools.coco import COCO
 from ...base import BaseDatasetChecker
 from .dataset_src import check, convert, split_dataset, deep_analyse
 
-from ..support_models import SUPPORT_MODELS
+from ..model_list import MODELS
 
 
 class COCODatasetChecker(BaseDatasetChecker):
     """Dataset Checker for Object Detection Model
     """
-    support_models = SUPPORT_MODELS
+    entities = MODELS
     sample_num = 10
 
     def get_dataset_root(self, dataset_dir: str) -> str:
@@ -83,7 +83,7 @@ class COCODatasetChecker(BaseDatasetChecker):
         Returns:
             dict: dataset summary.
         """
-        return check(dataset_dir, self.output_dir)
+        return check(dataset_dir, self.output)
 
     def analyse(self, dataset_dir: str) -> dict:
         """deep analyse dataset
@@ -94,7 +94,7 @@ class COCODatasetChecker(BaseDatasetChecker):
         Returns:
             dict: the deep analysis results.
         """
-        return deep_analyse(dataset_dir, self.output_dir)
+        return deep_analyse(dataset_dir, self.output)
 
     def get_show_type(self) -> str:
         """get the show type of dataset

+ 2 - 2
paddlex/modules/object_detection/dataset_checker/dataset_src/analyse_dataset.py

@@ -29,7 +29,7 @@ from pycocotools.coco import COCO
 from .....utils.fonts import PINGFANG_FONT_FILE_PATH
 
 
-def deep_analyse(dataset_dir, output_dir):
+def deep_analyse(dataset_dir, output):
     """class analysis for dataset"""
     tags = ['train', 'val']
     all_instances = 0
@@ -78,6 +78,6 @@ def deep_analyse(dataset_dir, output_dir):
     ax.set_ylabel('Counts')
     plt.legend()
     fig.tight_layout()
-    fig_path = os.path.join(output_dir, "histogram.png")
+    fig_path = os.path.join(output, "histogram.png")
     fig.savefig(fig_path)
     return {"histogram": os.path.join("check_dataset", "histogram.png")}

+ 3 - 3
paddlex/modules/object_detection/dataset_checker/dataset_src/check_dataset.py

@@ -25,7 +25,7 @@ from .....utils.errors import DatasetFileNotFoundError
 from .utils.visualizer import draw_bbox
 
 
-def check(dataset_dir, output_dir, sample_num=10):
+def check(dataset_dir, output, sample_num=10):
     """ check dataset """
     dataset_dir = osp.abspath(dataset_dir)
     if not osp.exists(dataset_dir) or not osp.isdir(dataset_dir):
@@ -53,7 +53,7 @@ def check(dataset_dir, output_dir, sample_num=10):
             coco = COCO(file_list)
             num_class = len(coco.getCatIds())
 
-            vis_save_dir = osp.join(output_dir, 'demo_img')
+            vis_save_dir = osp.join(output, 'demo_img')
 
             image_info = jsondata['images']
             sample_cnts[tag] = len(image_info)
@@ -71,7 +71,7 @@ def check(dataset_dir, output_dir, sample_num=10):
                 Path(vis_path).parent.mkdir(parents=True, exist_ok=True)
                 vis_im.save(vis_path)
                 sample_path = osp.join('check_dataset',
-                                       os.path.relpath(vis_path, output_dir))
+                                       os.path.relpath(vis_path, output))
                 sample_paths[tag].append(sample_path)
 
     attrs = {}

+ 4 - 4
paddlex/modules/object_detection/dataset_checker/dataset_src/convert_dataset.py

@@ -245,7 +245,7 @@ def convert_voc_dataset(root_dir, anno_map):
             annotation_paths=ann_paths,
             label_indexer=label_indexer,
             img_indexer=img_indexer,
-            output_dir=annotations_dir,
+            output=annotations_dir,
             output_file=dst_anno)
 
 
@@ -354,7 +354,7 @@ def voc_get_coco_annotation(obj, label_indexer):
 
 
 def voc_xmls_to_cocojson(root_dir, annotation_paths, label_indexer, img_indexer,
-                         output_dir, output_file):
+                         output, output_file):
     """
     Convert VOC format data to COCO format.
     
@@ -362,7 +362,7 @@ def voc_xmls_to_cocojson(root_dir, annotation_paths, label_indexer, img_indexer,
         annotation_paths (list): A list of paths to the XML files.
         label_indexer: indexer to get category id by label name.
         img_indexer: indexer to get image id by filename.
-        output_dir (str): The directory to save output JSON file.
+        output (str): The directory to save output JSON file.
         output_file (str): Output JSON file name.
     
     Returns:
@@ -425,6 +425,6 @@ def voc_xmls_to_cocojson(root_dir, annotation_paths, label_indexer, img_indexer,
             bnd_id = bnd_id + 1
 
     output_json_dict['categories'] = label_indexer.get_list(key_name="name")
-    output_file = os.path.join(output_dir, output_file)
+    output_file = os.path.join(output, output_file)
     write_json_file(output_json_dict, output_file)
     info(f"The converted annotations has been save to {output_file}.")

+ 2 - 2
paddlex/modules/object_detection/evaluator.py

@@ -14,12 +14,12 @@
 
 
 from ..base import BaseEvaluator
-from .support_models import SUPPORT_MODELS
+from .model_list import MODELS
 
 
 class DetEvaluator(BaseEvaluator):
     """ Object Detection Model Evaluator """
-    support_models = SUPPORT_MODELS
+    entities = MODELS
 
     def update_config(self):
         """update evalution config

+ 1 - 1
paddlex/modules/object_detection/support_models.py → paddlex/modules/object_detection/model_list.py

@@ -13,7 +13,7 @@
 # limitations under the License.
 
 
-SUPPORT_MODELS = [
+MODELS = [
     'PicoDet-L',
     'PicoDet-S',
     'PP-YOLOE_plus-L',

+ 3 - 3
paddlex/modules/object_detection/predictor/predictor.py

@@ -24,12 +24,12 @@ from ...base.predictor.transforms import image_common
 from . import transforms as T
 from .keys import DetKeys as K
 from .utils import InnerConfig
-from ..support_models import SUPPORT_MODELS
+from ..model_list import MODELS
 
 
 class DetPredictor(BasePredictor):
     """ Detection Predictor """
-    support_models = SUPPORT_MODELS
+    entities = MODELS
 
     def load_other_src(self):
         """ load the inner config file """
@@ -91,6 +91,6 @@ class DetPredictor(BasePredictor):
         """ get postprocess transforms """
         return [
             T.SaveDetResults(
-                save_dir=self.output_dir, labels=self.other_src.labels),
+                save_dir=self.output, labels=self.other_src.labels),
             T.PrintResult()
         ]

+ 2 - 2
paddlex/modules/object_detection/trainer.py

@@ -19,12 +19,12 @@ import paddle
 from ..base import BaseTrainer, BaseTrainDeamon
 from ...utils.config import AttrDict
 from ...utils import logging
-from .support_models import SUPPORT_MODELS
+from .model_list import MODELS
 
 
 class DetTrainer(BaseTrainer):
     """ Object Detection Model Trainer """
-    support_models = SUPPORT_MODELS
+    entities = MODELS
 
     def build_deamon(self, config: AttrDict) -> "DetTrainDeamon":
         """build deamon thread for saving training outputs timely

+ 4 - 4
paddlex/modules/semantic_segmentation/dataset_checker/__init__.py

@@ -19,12 +19,12 @@ import os.path as osp
 from ...base import BaseDatasetChecker
 from .dataset_src import check_dataset, convert_dataset, split_dataset, anaylse_dataset
 
-from ..support_models import SUPPORT_MODELS
+from ..model_list import MODELS
 
 
 class SegDatasetChecker(BaseDatasetChecker):
     """ Dataset Checker for Semantic Segmentation Model """
-    support_models = SUPPORT_MODELS
+    entities = MODELS
     sample_num = 10
 
     def convert_dataset(self, src_dataset_dir: str) -> str:
@@ -62,7 +62,7 @@ class SegDatasetChecker(BaseDatasetChecker):
         Returns:
             dict: dataset summary.
         """
-        return check_dataset(dataset_dir, self.output_dir, sample_num)
+        return check_dataset(dataset_dir, self.output, sample_num)
 
     def analyse(self, dataset_dir: str) -> dict:
         """deep analyse dataset
@@ -73,7 +73,7 @@ class SegDatasetChecker(BaseDatasetChecker):
         Returns:
             dict: the deep analysis results.
         """
-        return anaylse_dataset(dataset_dir, self.output_dir)
+        return anaylse_dataset(dataset_dir, self.output)
 
     def get_show_type(self) -> str:
         """get the show type of dataset

+ 2 - 2
paddlex/modules/semantic_segmentation/dataset_checker/dataset_src/analyse_dataset.py

@@ -25,7 +25,7 @@ from .....utils.file_interface import custom_open
 from .....utils.logging import info
 
 
-def anaylse_dataset(dataset_dir, output_dir):
+def anaylse_dataset(dataset_dir, output):
     """class analysis for dataset"""
 
     split_tags = ["train", "val"]
@@ -71,6 +71,6 @@ def anaylse_dataset(dataset_dir, output_dir):
     ax.set_ylabel('Sample Counts')
     plt.legend()
     fig.tight_layout()
-    fig_path = os.path.join(output_dir, "histogram.png")
+    fig_path = os.path.join(output, "histogram.png")
     fig.savefig(fig_path)
     return {"histogram": os.path.join("check_dataset", "histogram.png")}

+ 3 - 4
paddlex/modules/semantic_segmentation/dataset_checker/dataset_src/check_dataset.py

@@ -26,12 +26,12 @@ from .....utils.file_interface import custom_open
 from .....utils.logging import info
 
 
-def check_dataset(dataset_dir, output_dir, sample_num=10):
+def check_dataset(dataset_dir, output, sample_num=10):
     """ check dataset """
     dataset_dir = osp.abspath(dataset_dir)
     if not osp.exists(dataset_dir) or not osp.isdir(dataset_dir):
         raise DatasetFileNotFoundError(file_path=dataset_dir)
-    vis_save_dir = osp.join(output_dir, 'demo_img')
+    vis_save_dir = osp.join(output, 'demo_img')
     if not osp.exists(vis_save_dir):
         os.makedirs(vis_save_dir)
     split_tags = ["train", "val"]
@@ -67,8 +67,7 @@ def check_dataset(dataset_dir, output_dir, sample_num=10):
                                              osp.basename(img_file))
                     vis_img.save(vis_save_path)
                     vis_save_path = osp.join(
-                        'check_dataset',
-                        os.path.relpath(vis_save_path, output_dir))
+                        'check_dataset', os.path.relpath(vis_save_path, output))
                     if f"{tag}_sample_paths" not in attrs:
                         attrs[f"{tag}_sample_paths"] = [vis_save_path]
                     else:

+ 2 - 2
paddlex/modules/semantic_segmentation/evaluator.py

@@ -15,12 +15,12 @@
 
 from pathlib import Path
 from ..base import BaseEvaluator
-from .support_models import SUPPORT_MODELS
+from .model_list import MODELS
 
 
 class SegEvaluator(BaseEvaluator):
     """ Semantic Segmentation Model Evaluator """
-    support_models = SUPPORT_MODELS
+    entities = MODELS
 
     def update_config(self):
         """update evalution config

+ 1 - 1
paddlex/modules/semantic_segmentation/support_models.py → paddlex/modules/semantic_segmentation/model_list.py

@@ -13,7 +13,7 @@
 # limitations under the License.
 
 
-SUPPORT_MODELS = [
+MODELS = [
     'Deeplabv3_Plus-R101',
     'Deeplabv3_Plus-R50',
     'Deeplabv3-R101',

+ 5 - 6
paddlex/modules/semantic_segmentation/predictor/predictor.py

@@ -23,24 +23,24 @@ from ...base import BasePredictor
 from .keys import SegKeys as K
 from . import transforms as T
 from .utils import InnerConfig
-from ..support_models import SUPPORT_MODELS
+from ..model_list import MODELS
 
 
 class SegPredictor(BasePredictor):
     """ SegPredictor """
-    support_models = SUPPORT_MODELS
+    entities = MODELS
 
     def __init__(self,
                  model_dir,
                  kernel_option,
-                 output_dir,
+                 output,
                  pre_transforms=None,
                  post_transforms=None,
                  has_prob_map=False):
         super().__init__(
             model_dir=model_dir,
             kernel_option=kernel_option,
-            output_dir=output_dir,
+            output=output,
             pre_transforms=pre_transforms,
             post_transforms=post_transforms)
         self.has_prob_map = has_prob_map
@@ -104,6 +104,5 @@ class SegPredictor(BasePredictor):
     def _get_post_transforms_from_config(self):
         """ _get_post_transforms_from_config """
         return [
-            T.GeneratePCMap(), T.SaveSegResults(self.output_dir),
-            T.PrintResult()
+            T.GeneratePCMap(), T.SaveSegResults(self.output), T.PrintResult()
         ]

+ 2 - 2
paddlex/modules/semantic_segmentation/trainer.py

@@ -20,12 +20,12 @@ import paddle
 
 from ..base import BaseTrainer, BaseTrainDeamon
 from ...utils.config import AttrDict
-from .support_models import SUPPORT_MODELS
+from .model_list import MODELS
 
 
 class SegTrainer(BaseTrainer):
     """ Semantic Segmentation Model Trainer """
-    support_models = SUPPORT_MODELS
+    entities = MODELS
 
     def build_deamon(self, config: AttrDict) -> "SegTrainDeamon":
         """build deamon thread for saving training outputs timely

Some files were not shown because too many files changed in this diff