Răsfoiți Sursa

Merge pull request #1767 from cuicheng01/develop

polish docs and fix bugs
cuicheng01 1 an în urmă
părinte
comite
0f6f80734c
100 a modificat fișierele cu 185 adăugiri și 196 ștergeri
  1. 9 7
      README.md
  2. 1 3
      docs/tutorials/INSTALL.md
  3. 7 7
      docs/tutorials/inference/pipeline_inference_tools.md
  4. 1 1
      paddlex/configs/image_classification/CLIP_vit_base_patch16_224.yaml
  5. 1 1
      paddlex/configs/image_classification/CLIP_vit_large_patch14_224.yaml
  6. 1 1
      paddlex/configs/image_classification/ConvNeXt_tiny.yaml
  7. 1 1
      paddlex/configs/image_classification/MobileNetV2_x0_25.yaml
  8. 1 1
      paddlex/configs/image_classification/MobileNetV2_x0_5.yaml
  9. 1 1
      paddlex/configs/image_classification/MobileNetV2_x1_0.yaml
  10. 1 1
      paddlex/configs/image_classification/MobileNetV2_x1_5.yaml
  11. 1 1
      paddlex/configs/image_classification/MobileNetV2_x2_0.yaml
  12. 1 1
      paddlex/configs/image_classification/MobileNetV3_large_x0_35.yaml
  13. 1 1
      paddlex/configs/image_classification/MobileNetV3_large_x0_5.yaml
  14. 1 1
      paddlex/configs/image_classification/MobileNetV3_large_x0_75.yaml
  15. 1 1
      paddlex/configs/image_classification/MobileNetV3_large_x1_0.yaml
  16. 1 1
      paddlex/configs/image_classification/MobileNetV3_large_x1_25.yaml
  17. 1 1
      paddlex/configs/image_classification/MobileNetV3_small_x0_35.yaml
  18. 1 1
      paddlex/configs/image_classification/MobileNetV3_small_x0_5.yaml
  19. 1 1
      paddlex/configs/image_classification/MobileNetV3_small_x0_75.yaml
  20. 1 1
      paddlex/configs/image_classification/MobileNetV3_small_x1_0.yaml
  21. 1 1
      paddlex/configs/image_classification/MobileNetV3_small_x1_25.yaml
  22. 1 1
      paddlex/configs/image_classification/PP-HGNetV2-B0.yaml
  23. 1 1
      paddlex/configs/image_classification/PP-HGNetV2-B4.yaml
  24. 1 1
      paddlex/configs/image_classification/PP-HGNetV2-B6.yaml
  25. 1 1
      paddlex/configs/image_classification/PP-HGNet_small.yaml
  26. 1 1
      paddlex/configs/image_classification/PP-LCNet_x0_25.yaml
  27. 1 1
      paddlex/configs/image_classification/PP-LCNet_x0_35.yaml
  28. 1 1
      paddlex/configs/image_classification/PP-LCNet_x0_5.yaml
  29. 1 1
      paddlex/configs/image_classification/PP-LCNet_x0_75.yaml
  30. 1 1
      paddlex/configs/image_classification/PP-LCNet_x1_0.yaml
  31. 1 1
      paddlex/configs/image_classification/PP-LCNet_x1_5.yaml
  32. 1 1
      paddlex/configs/image_classification/PP-LCNet_x2_0.yaml
  33. 1 1
      paddlex/configs/image_classification/PP-LCNet_x2_5.yaml
  34. 1 1
      paddlex/configs/image_classification/ResNet101.yaml
  35. 1 1
      paddlex/configs/image_classification/ResNet152.yaml
  36. 1 1
      paddlex/configs/image_classification/ResNet18.yaml
  37. 1 1
      paddlex/configs/image_classification/ResNet34.yaml
  38. 1 1
      paddlex/configs/image_classification/ResNet50.yaml
  39. 1 1
      paddlex/configs/image_classification/SwinTransformer_base_patch4_window7_224.yaml
  40. 1 1
      paddlex/configs/instance_segmentation/Mask-RT-DETR-H.yaml
  41. 1 1
      paddlex/configs/instance_segmentation/Mask-RT-DETR-L.yaml
  42. 1 1
      paddlex/configs/object_detection/PP-YOLOE_plus-L.yaml
  43. 1 1
      paddlex/configs/object_detection/PP-YOLOE_plus-M.yaml
  44. 1 1
      paddlex/configs/object_detection/PP-YOLOE_plus-S.yaml
  45. 1 1
      paddlex/configs/object_detection/PP-YOLOE_plus-X.yaml
  46. 1 1
      paddlex/configs/object_detection/PicoDet-L.yaml
  47. 1 1
      paddlex/configs/object_detection/PicoDet-S.yaml
  48. 1 1
      paddlex/configs/object_detection/RT-DETR-H.yaml
  49. 1 1
      paddlex/configs/object_detection/RT-DETR-L.yaml
  50. 1 1
      paddlex/configs/object_detection/RT-DETR-R18.yaml
  51. 1 1
      paddlex/configs/object_detection/RT-DETR-R50.yaml
  52. 1 1
      paddlex/configs/object_detection/RT-DETR-X.yaml
  53. 1 1
      paddlex/configs/semantic_segmentation/Deeplabv3-R101.yaml
  54. 1 1
      paddlex/configs/semantic_segmentation/Deeplabv3-R50.yaml
  55. 1 1
      paddlex/configs/semantic_segmentation/Deeplabv3_Plus-R101.yaml
  56. 1 1
      paddlex/configs/semantic_segmentation/Deeplabv3_Plus-R50.yaml
  57. 1 1
      paddlex/configs/semantic_segmentation/OCRNet_HRNet-W48.yaml
  58. 1 1
      paddlex/configs/semantic_segmentation/PP-LiteSeg-T.yaml
  59. 1 1
      paddlex/configs/text_detection/PP-OCRv4_mobile_det.yaml
  60. 1 1
      paddlex/configs/text_detection/PP-OCRv4_server_det.yaml
  61. 1 1
      paddlex/configs/text_recognition/PP-OCRv4_mobile_rec.yaml
  62. 1 1
      paddlex/configs/text_recognition/PP-OCRv4_server_rec.yaml
  63. 2 2
      paddlex/engine.py
  64. 4 5
      paddlex/modules/base/dataset_checker/dataset_checker.py
  65. 7 12
      paddlex/modules/base/evaluator.py
  66. 5 5
      paddlex/modules/base/predictor/predictor.py
  67. 1 1
      paddlex/modules/base/predictor/utils/official_models.py
  68. 4 5
      paddlex/modules/base/trainer/train_deamon.py
  69. 6 10
      paddlex/modules/base/trainer/trainer.py
  70. 4 4
      paddlex/modules/image_classification/dataset_checker/__init__.py
  71. 2 2
      paddlex/modules/image_classification/dataset_checker/dataset_src/analyse_dataset.py
  72. 4 5
      paddlex/modules/image_classification/dataset_checker/dataset_src/check_dataset.py
  73. 2 2
      paddlex/modules/image_classification/evaluator.py
  74. 1 1
      paddlex/modules/image_classification/model_list.py
  75. 3 3
      paddlex/modules/image_classification/predictor/predictor.py
  76. 1 1
      paddlex/modules/image_classification/predictor/transforms.py
  77. 5 2
      paddlex/modules/image_classification/predictor/utils.py
  78. 2 2
      paddlex/modules/image_classification/trainer.py
  79. 4 4
      paddlex/modules/instance_segmentation/dataset_checker/__init__.py
  80. 2 2
      paddlex/modules/instance_segmentation/dataset_checker/dataset_src/analyse_dataset.py
  81. 3 3
      paddlex/modules/instance_segmentation/dataset_checker/dataset_src/check_dataset.py
  82. 2 2
      paddlex/modules/instance_segmentation/evaluator.py
  83. 1 1
      paddlex/modules/instance_segmentation/model_list.py
  84. 2 2
      paddlex/modules/instance_segmentation/predictor/predictor.py
  85. 2 2
      paddlex/modules/instance_segmentation/trainer.py
  86. 4 4
      paddlex/modules/object_detection/dataset_checker/__init__.py
  87. 2 2
      paddlex/modules/object_detection/dataset_checker/dataset_src/analyse_dataset.py
  88. 3 3
      paddlex/modules/object_detection/dataset_checker/dataset_src/check_dataset.py
  89. 4 4
      paddlex/modules/object_detection/dataset_checker/dataset_src/convert_dataset.py
  90. 2 2
      paddlex/modules/object_detection/evaluator.py
  91. 1 1
      paddlex/modules/object_detection/model_list.py
  92. 3 3
      paddlex/modules/object_detection/predictor/predictor.py
  93. 2 2
      paddlex/modules/object_detection/trainer.py
  94. 4 4
      paddlex/modules/semantic_segmentation/dataset_checker/__init__.py
  95. 2 2
      paddlex/modules/semantic_segmentation/dataset_checker/dataset_src/analyse_dataset.py
  96. 3 4
      paddlex/modules/semantic_segmentation/dataset_checker/dataset_src/check_dataset.py
  97. 2 2
      paddlex/modules/semantic_segmentation/evaluator.py
  98. 1 1
      paddlex/modules/semantic_segmentation/model_list.py
  99. 5 6
      paddlex/modules/semantic_segmentation/predictor/predictor.py
  100. 2 2
      paddlex/modules/semantic_segmentation/trainer.py

+ 9 - 7
README.md

@@ -24,8 +24,8 @@ PaddleX3.0 分为本地端和云端,本地端提供统一任务API接口,支
 ## 安装与快速开始
 ## 安装与快速开始
 - [安装](./docs/tutorials/INSTALL.md)
 - [安装](./docs/tutorials/INSTALL.md)
 - 快速开始
 - 快速开始
-  - [单模型开发工具快速开始](./docs/tutorials/QUCK_STARTED.md)
-  - 模型产线开发工具快速开始(comming soon)
+  - [单模型开发工具](./docs/tutorials/tools/model_tools.md)
+  - [模型产线开发工具](./docs/tutorials/tools/pipelines_tools.md)
 
 
 ## 单模型开发工具
 ## 单模型开发工具
 本节介绍 PaddleX3.0 单模型的全流程开发流程,包括数据准备、模型训练/评估、模型推理的使用方法。PaddleX3.0 支持的模型可以参考 [PaddleX模型库](./docs/tutorials/models/support_model_list.md)。
 本节介绍 PaddleX3.0 单模型的全流程开发流程,包括数据准备、模型训练/评估、模型推理的使用方法。PaddleX3.0 支持的模型可以参考 [PaddleX模型库](./docs/tutorials/models/support_model_list.md)。
@@ -35,17 +35,19 @@ PaddleX3.0 分为本地端和云端,本地端提供统一任务API接口,支
 - [数据标注](./docs/tutorials/data/annotation/README.md)
 - [数据标注](./docs/tutorials/data/annotation/README.md)
 - [数据校验](./docs/tutorials/data/dataset_check.md)
 - [数据校验](./docs/tutorials/data/dataset_check.md)
 ### 2. 模型训练
 ### 2. 模型训练
-- [模型训练/评估](./docs/tutorials/train/README.md)
-- 模型优化(comming soon)
+- [模型训练/评估](./docs/tutorials/base/README.md)
+- [模型优化](./docs/tutorials/base/model_optimize.md)
 
 
 ### 3. 模型推理
 ### 3. 模型推理
-- 模型推理能力即将开源,敬请期待...
+ - [模型推理](docs/tutorials/inference/model_inference_tools.md)
+ - [模型推理 API 介绍](docs/tutorials/inference/model_infernce_api.md)
 
 
 ## 模型产线开发工具
 ## 模型产线开发工具
-本节将介绍 PaddleX3.0 模型产线的全流程开发流程,此部分内容即将开源,敬请期待...
+ - [模型产线推理](docs/tutorials/inference/pipeline_inference_tools.md)
+ - [模型产线推理 API 介绍](docs/tutorials/inference/pipeline_infernce_api.md)
 
 
 ## 多硬件支持
 ## 多硬件支持
-🔥 本项目支持在多种硬件上进行模型的开发,除了GPU外,当前支持的硬件还有**昆仑芯**、**昇腾芯**、**寒武纪芯**。只需添加一个配置设备的参数,即可在对应硬件上使用上述工具。详情参考[多硬件训练](./docs/tutorials/train/multi_device_train.md)
+🔥 本项目支持在多种硬件上进行模型的开发,除了 GPU 外,当前支持的硬件还有**昆仑芯**、**昇腾芯**、**寒武纪芯**。只需添加一个配置设备的参数,即可在对应硬件上使用上述工具。详情可以参考上述文档
 
 
 - 昇腾芯支持的模型列表请参考 [PaddleX 昇腾芯模型列表](./docs/tutorials/models/support_npu_model_list.md)。
 - 昇腾芯支持的模型列表请参考 [PaddleX 昇腾芯模型列表](./docs/tutorials/models/support_npu_model_list.md)。
 - 昆仑芯支持的模型列表请参考 [PaddleX 昆仑芯模型列表](./docs/tutorials/models/support_xpu_model_list.md)。
 - 昆仑芯支持的模型列表请参考 [PaddleX 昆仑芯模型列表](./docs/tutorials/models/support_xpu_model_list.md)。

+ 1 - 3
docs/tutorials/INSTALL.md

@@ -88,11 +88,9 @@ git clone https://gitee.com/paddlepaddle/PaddleX.git
 <!-- 这里需要指明安装成功的状态, 廷权 -->
 <!-- 这里需要指明安装成功的状态, 廷权 -->
 ```bash
 ```bash
 cd PaddleX
 cd PaddleX
-# 安装第三方依赖
-pip install -r requirements.txt
 
 
 # 安装 PaddleX whl
 # 安装 PaddleX whl
-# -e:以可编辑模式安装,当前项目的代码更改,都会作用到 PaddleX Wheel
+# -e:以可编辑模式安装,当前项目的代码更改,都会直接作用到已经安装的 PaddleX Wheel
 pip install -e .
 pip install -e .
 
 
 # 安装 PaddleX 相关依赖
 # 安装 PaddleX 相关依赖

+ 7 - 7
docs/tutorials/inference/pipeline_inference_tools.md

@@ -38,7 +38,7 @@ result = pipeline.predict(
         {'input_path': "https://paddle-model-ecology.bj.bcebos.com/paddlex/imgs/demo_image/general_image_classification_001.jpg"}
         {'input_path': "https://paddle-model-ecology.bj.bcebos.com/paddlex/imgs/demo_image/general_image_classification_001.jpg"}
     )
     )
 print(result["cls_result"])
 print(result["cls_result"])
-```    
+```  
 </details>
 </details>
 
 
 ### 2.2 目标检测产线
 ### 2.2 目标检测产线
@@ -71,8 +71,8 @@ from paddlex import PaddleInferenceOption
 model_name =  "RT-DETR-L"
 model_name =  "RT-DETR-L"
 output_base = Path("output")
 output_base = Path("output")
 
 
-output_dir = output_base / model_name
-pipeline = DetPipeline(model_name, output_dir=output_dir, kernel_option=PaddleInferenceOption())
+output = output_base / model_name
+pipeline = DetPipeline(model_name, output=output, kernel_option=PaddleInferenceOption())
 result = pipeline.predict(
 result = pipeline.predict(
         {"input_path": "https://paddle-model-ecology.bj.bcebos.com/paddlex/imgs/demo_image/general_object_detection_002.png"})
         {"input_path": "https://paddle-model-ecology.bj.bcebos.com/paddlex/imgs/demo_image/general_object_detection_002.png"})
 print(result["boxes"])
 print(result["boxes"])
@@ -111,8 +111,8 @@ from paddlex import PaddleInferenceOption
 
 
 model_name = "PP-LiteSeg-T",
 model_name = "PP-LiteSeg-T",
 output_base = Path("output")
 output_base = Path("output")
-output_dir = output_base / model_name
-pipeline = SegPipeline(model_name, output_dir=output_dir, kernel_option=PaddleInferenceOption())
+output = output_base / model_name
+pipeline = SegPipeline(model_name, output=output, kernel_option=PaddleInferenceOption())
 result = pipeline.predict(
 result = pipeline.predict(
     {"input_path": "https://paddle-model-ecology.bj.bcebos.com/paddlex/imgs/demo_image/general_semantic_segmentation_002.png"}
     {"input_path": "https://paddle-model-ecology.bj.bcebos.com/paddlex/imgs/demo_image/general_semantic_segmentation_002.png"}
 )
 )
@@ -152,8 +152,8 @@ from paddlex import PaddleInferenceOption
 model_name =  "Mask-RT-DETR-L"
 model_name =  "Mask-RT-DETR-L"
 output_base = Path("output")
 output_base = Path("output")
 
 
-output_dir = output_base / model_name
-pipeline = DetPipeline(model_name, output_dir=output_dir, kernel_option=PaddleInferenceOption())
+output = output_base / model_name
+pipeline = DetPipeline(model_name, output=output, kernel_option=PaddleInferenceOption())
 result = pipeline.predict(
 result = pipeline.predict(
     {"input_path": "https://paddle-model-ecology.bj.bcebos.com/paddlex/imgs/demo_image/general_instance_segmentation_004.png"})
     {"input_path": "https://paddle-model-ecology.bj.bcebos.com/paddlex/imgs/demo_image/general_instance_segmentation_004.png"})
 print(result["boxes"])
 print(result["boxes"])

+ 1 - 1
paddlex/configs/image_classification/CLIP_vit_base_patch16_224.yaml

@@ -32,7 +32,7 @@ Evaluate:
 
 
 Predict:
 Predict:
   model_dir: "output/best_model"
   model_dir: "output/best_model"
-  input_path: "/paddle/dataset/paddlex/cls/cls_flowers_examples/images/image_00002.jpg"
+  input_path: "https://paddle-model-ecology.bj.bcebos.com/paddlex/imgs/demo_image/general_image_classification_001.jpg"
   kernel_option:
   kernel_option:
     run_mode: paddle
     run_mode: paddle
     batch_size: 1
     batch_size: 1

+ 1 - 1
paddlex/configs/image_classification/CLIP_vit_large_patch14_224.yaml

@@ -32,7 +32,7 @@ Evaluate:
 
 
 Predict:
 Predict:
   model_dir: "output/best_model"
   model_dir: "output/best_model"
-  input_path: "/paddle/dataset/paddlex/cls/cls_flowers_examples/images/image_00002.jpg"
+  input_path: "https://paddle-model-ecology.bj.bcebos.com/paddlex/imgs/demo_image/general_image_classification_001.jpg"
   kernel_option:
   kernel_option:
     run_mode: paddle
     run_mode: paddle
     batch_size: 1
     batch_size: 1

+ 1 - 1
paddlex/configs/image_classification/ConvNeXt_tiny.yaml

@@ -32,7 +32,7 @@ Evaluate:
 
 
 Predict:
 Predict:
   model_dir: "output/best_model"
   model_dir: "output/best_model"
-  input_path: "/paddle/dataset/paddlex/cls/cls_flowers_examples/images/image_00002.jpg"
+  input_path: "https://paddle-model-ecology.bj.bcebos.com/paddlex/imgs/demo_image/general_image_classification_001.jpg"
   kernel_option:
   kernel_option:
     run_mode: paddle
     run_mode: paddle
     batch_size: 1
     batch_size: 1

+ 1 - 1
paddlex/configs/image_classification/MobileNetV2_x0_25.yaml

@@ -32,7 +32,7 @@ Evaluate:
 
 
 Predict:
 Predict:
   model_dir: "output/best_model"
   model_dir: "output/best_model"
-  input_path: "/paddle/dataset/paddlex/cls/cls_flowers_examples/images/image_00002.jpg"
+  input_path: "https://paddle-model-ecology.bj.bcebos.com/paddlex/imgs/demo_image/general_image_classification_001.jpg"
   kernel_option:
   kernel_option:
     run_mode: paddle
     run_mode: paddle
     batch_size: 1
     batch_size: 1

+ 1 - 1
paddlex/configs/image_classification/MobileNetV2_x0_5.yaml

@@ -32,7 +32,7 @@ Evaluate:
 
 
 Predict:
 Predict:
   model_dir: "output/best_model"
   model_dir: "output/best_model"
-  input_path: "/paddle/dataset/paddlex/cls/cls_flowers_examples/images/image_00002.jpg"
+  input_path: "https://paddle-model-ecology.bj.bcebos.com/paddlex/imgs/demo_image/general_image_classification_001.jpg"
   kernel_option:
   kernel_option:
     run_mode: paddle
     run_mode: paddle
     batch_size: 1
     batch_size: 1

+ 1 - 1
paddlex/configs/image_classification/MobileNetV2_x1_0.yaml

@@ -32,7 +32,7 @@ Evaluate:
 
 
 Predict:
 Predict:
   model_dir: "output/best_model"
   model_dir: "output/best_model"
-  input_path: "/paddle/dataset/paddlex/cls/cls_flowers_examples/images/image_00002.jpg"
+  input_path: "https://paddle-model-ecology.bj.bcebos.com/paddlex/imgs/demo_image/general_image_classification_001.jpg"
   kernel_option:
   kernel_option:
     run_mode: paddle
     run_mode: paddle
     batch_size: 1
     batch_size: 1

+ 1 - 1
paddlex/configs/image_classification/MobileNetV2_x1_5.yaml

@@ -32,7 +32,7 @@ Evaluate:
 
 
 Predict:
 Predict:
   model_dir: "output/best_model"
   model_dir: "output/best_model"
-  input_path: "/paddle/dataset/paddlex/cls/cls_flowers_examples/images/image_00002.jpg"
+  input_path: "https://paddle-model-ecology.bj.bcebos.com/paddlex/imgs/demo_image/general_image_classification_001.jpg"
   kernel_option:
   kernel_option:
     run_mode: paddle
     run_mode: paddle
     batch_size: 1
     batch_size: 1

+ 1 - 1
paddlex/configs/image_classification/MobileNetV2_x2_0.yaml

@@ -32,7 +32,7 @@ Evaluate:
 
 
 Predict:
 Predict:
   model_dir: "output/best_model"
   model_dir: "output/best_model"
-  input_path: "/paddle/dataset/paddlex/cls/cls_flowers_examples/images/image_00002.jpg"
+  input_path: "https://paddle-model-ecology.bj.bcebos.com/paddlex/imgs/demo_image/general_image_classification_001.jpg"
   kernel_option:
   kernel_option:
     run_mode: paddle
     run_mode: paddle
     batch_size: 1
     batch_size: 1

+ 1 - 1
paddlex/configs/image_classification/MobileNetV3_large_x0_35.yaml

@@ -32,7 +32,7 @@ Evaluate:
 
 
 Predict:
 Predict:
   model_dir: "output/best_model"
   model_dir: "output/best_model"
-  input_path: "/paddle/dataset/paddlex/cls/cls_flowers_examples/images/image_00002.jpg"
+  input_path: "https://paddle-model-ecology.bj.bcebos.com/paddlex/imgs/demo_image/general_image_classification_001.jpg"
   kernel_option:
   kernel_option:
     run_mode: paddle
     run_mode: paddle
     batch_size: 1
     batch_size: 1

+ 1 - 1
paddlex/configs/image_classification/MobileNetV3_large_x0_5.yaml

@@ -32,7 +32,7 @@ Evaluate:
 
 
 Predict:
 Predict:
   model_dir: "output/best_model"
   model_dir: "output/best_model"
-  input_path: "/paddle/dataset/paddlex/cls/cls_flowers_examples/images/image_00002.jpg"
+  input_path: "https://paddle-model-ecology.bj.bcebos.com/paddlex/imgs/demo_image/general_image_classification_001.jpg"
   kernel_option:
   kernel_option:
     run_mode: paddle
     run_mode: paddle
     batch_size: 1
     batch_size: 1

+ 1 - 1
paddlex/configs/image_classification/MobileNetV3_large_x0_75.yaml

@@ -32,7 +32,7 @@ Evaluate:
 
 
 Predict:
 Predict:
   model_dir: "output/best_model"
   model_dir: "output/best_model"
-  input_path: "/paddle/dataset/paddlex/cls/cls_flowers_examples/images/image_00002.jpg"
+  input_path: "https://paddle-model-ecology.bj.bcebos.com/paddlex/imgs/demo_image/general_image_classification_001.jpg"
   kernel_option:
   kernel_option:
     run_mode: paddle
     run_mode: paddle
     batch_size: 1
     batch_size: 1

+ 1 - 1
paddlex/configs/image_classification/MobileNetV3_large_x1_0.yaml

@@ -32,7 +32,7 @@ Evaluate:
 
 
 Predict:
 Predict:
   model_dir: "output/best_model"
   model_dir: "output/best_model"
-  input_path: "/paddle/dataset/paddlex/cls/cls_flowers_examples/images/image_00002.jpg"
+  input_path: "https://paddle-model-ecology.bj.bcebos.com/paddlex/imgs/demo_image/general_image_classification_001.jpg"
   kernel_option:
   kernel_option:
     run_mode: paddle
     run_mode: paddle
     batch_size: 1
     batch_size: 1

+ 1 - 1
paddlex/configs/image_classification/MobileNetV3_large_x1_25.yaml

@@ -32,7 +32,7 @@ Evaluate:
 
 
 Predict:
 Predict:
   model_dir: "output/best_model"
   model_dir: "output/best_model"
-  input_path: "/paddle/dataset/paddlex/cls/cls_flowers_examples/images/image_00002.jpg"
+  input_path: "https://paddle-model-ecology.bj.bcebos.com/paddlex/imgs/demo_image/general_image_classification_001.jpg"
   kernel_option:
   kernel_option:
     run_mode: paddle
     run_mode: paddle
     batch_size: 1
     batch_size: 1

+ 1 - 1
paddlex/configs/image_classification/MobileNetV3_small_x0_35.yaml

@@ -32,7 +32,7 @@ Evaluate:
 
 
 Predict:
 Predict:
   model_dir: "output/best_model"
   model_dir: "output/best_model"
-  input_path: "/paddle/dataset/paddlex/cls/cls_flowers_examples/images/image_00002.jpg"
+  input_path: "https://paddle-model-ecology.bj.bcebos.com/paddlex/imgs/demo_image/general_image_classification_001.jpg"
   kernel_option:
   kernel_option:
     run_mode: paddle
     run_mode: paddle
     batch_size: 1
     batch_size: 1

+ 1 - 1
paddlex/configs/image_classification/MobileNetV3_small_x0_5.yaml

@@ -32,7 +32,7 @@ Evaluate:
 
 
 Predict:
 Predict:
   model_dir: "output/best_model"
   model_dir: "output/best_model"
-  input_path: "/paddle/dataset/paddlex/cls/cls_flowers_examples/images/image_00002.jpg"
+  input_path: "https://paddle-model-ecology.bj.bcebos.com/paddlex/imgs/demo_image/general_image_classification_001.jpg"
   kernel_option:
   kernel_option:
     run_mode: paddle
     run_mode: paddle
     batch_size: 1
     batch_size: 1

+ 1 - 1
paddlex/configs/image_classification/MobileNetV3_small_x0_75.yaml

@@ -32,7 +32,7 @@ Evaluate:
 
 
 Predict:
 Predict:
   model_dir: "output/best_model"
   model_dir: "output/best_model"
-  input_path: "/paddle/dataset/paddlex/cls/cls_flowers_examples/images/image_00002.jpg"
+  input_path: "https://paddle-model-ecology.bj.bcebos.com/paddlex/imgs/demo_image/general_image_classification_001.jpg"
   kernel_option:
   kernel_option:
     run_mode: paddle
     run_mode: paddle
     batch_size: 1
     batch_size: 1

+ 1 - 1
paddlex/configs/image_classification/MobileNetV3_small_x1_0.yaml

@@ -32,7 +32,7 @@ Evaluate:
 
 
 Predict:
 Predict:
   model_dir: "output/best_model"
   model_dir: "output/best_model"
-  input_path: "/paddle/dataset/paddlex/cls/cls_flowers_examples/images/image_00002.jpg"
+  input_path: "https://paddle-model-ecology.bj.bcebos.com/paddlex/imgs/demo_image/general_image_classification_001.jpg"
   kernel_option:
   kernel_option:
     run_mode: paddle
     run_mode: paddle
     batch_size: 1
     batch_size: 1

+ 1 - 1
paddlex/configs/image_classification/MobileNetV3_small_x1_25.yaml

@@ -32,7 +32,7 @@ Evaluate:
 
 
 Predict:
 Predict:
   model_dir: "output/best_model"
   model_dir: "output/best_model"
-  input_path: "/paddle/dataset/paddlex/cls/cls_flowers_examples/images/image_00002.jpg"
+  input_path: "https://paddle-model-ecology.bj.bcebos.com/paddlex/imgs/demo_image/general_image_classification_001.jpg"
   kernel_option:
   kernel_option:
     run_mode: paddle
     run_mode: paddle
     batch_size: 1
     batch_size: 1

+ 1 - 1
paddlex/configs/image_classification/PP-HGNetV2-B0.yaml

@@ -32,7 +32,7 @@ Evaluate:
 
 
 Predict:
 Predict:
   model_dir: "output/best_model"
   model_dir: "output/best_model"
-  input_path: "/paddle/dataset/paddlex/cls/cls_flowers_examples/images/image_00002.jpg"
+  input_path: "https://paddle-model-ecology.bj.bcebos.com/paddlex/imgs/demo_image/general_image_classification_001.jpg"
   kernel_option:
   kernel_option:
     run_mode: paddle
     run_mode: paddle
     batch_size: 1
     batch_size: 1

+ 1 - 1
paddlex/configs/image_classification/PP-HGNetV2-B4.yaml

@@ -32,7 +32,7 @@ Evaluate:
 
 
 Predict:
 Predict:
   model_dir: "output/best_model"
   model_dir: "output/best_model"
-  input_path: "/paddle/dataset/paddlex/cls/cls_flowers_examples/images/image_00002.jpg"
+  input_path: "https://paddle-model-ecology.bj.bcebos.com/paddlex/imgs/demo_image/general_image_classification_001.jpg"
   kernel_option:
   kernel_option:
     run_mode: paddle
     run_mode: paddle
     batch_size: 1
     batch_size: 1

+ 1 - 1
paddlex/configs/image_classification/PP-HGNetV2-B6.yaml

@@ -32,7 +32,7 @@ Evaluate:
 
 
 Predict:
 Predict:
   model_dir: "output/best_model"
   model_dir: "output/best_model"
-  input_path: "/paddle/dataset/paddlex/cls/cls_flowers_examples/images/image_00002.jpg"
+  input_path: "https://paddle-model-ecology.bj.bcebos.com/paddlex/imgs/demo_image/general_image_classification_001.jpg"
   kernel_option:
   kernel_option:
     run_mode: paddle
     run_mode: paddle
     batch_size: 1
     batch_size: 1

+ 1 - 1
paddlex/configs/image_classification/PP-HGNet_small.yaml

@@ -32,7 +32,7 @@ Evaluate:
 
 
 Predict:
 Predict:
   model_dir: "output/best_model"
   model_dir: "output/best_model"
-  input_path: "/paddle/dataset/paddlex/cls/cls_flowers_examples/images/image_00002.jpg"
+  input_path: "https://paddle-model-ecology.bj.bcebos.com/paddlex/imgs/demo_image/general_image_classification_001.jpg"
   kernel_option:
   kernel_option:
     run_mode: paddle
     run_mode: paddle
     batch_size: 1
     batch_size: 1

+ 1 - 1
paddlex/configs/image_classification/PP-LCNet_x0_25.yaml

@@ -32,7 +32,7 @@ Evaluate:
 
 
 Predict:
 Predict:
   model_dir: "output/best_model"
   model_dir: "output/best_model"
-  input_path: "/paddle/dataset/paddlex/cls/cls_flowers_examples/images/image_00002.jpg"
+  input_path: "https://paddle-model-ecology.bj.bcebos.com/paddlex/imgs/demo_image/general_image_classification_001.jpg"
   kernel_option:
   kernel_option:
     run_mode: paddle
     run_mode: paddle
     batch_size: 1
     batch_size: 1

+ 1 - 1
paddlex/configs/image_classification/PP-LCNet_x0_35.yaml

@@ -32,7 +32,7 @@ Evaluate:
 
 
 Predict:
 Predict:
   model_dir: "output/best_model"
   model_dir: "output/best_model"
-  input_path: "/paddle/dataset/paddlex/cls/cls_flowers_examples/images/image_00002.jpg"
+  input_path: "https://paddle-model-ecology.bj.bcebos.com/paddlex/imgs/demo_image/general_image_classification_001.jpg"
   kernel_option:
   kernel_option:
     run_mode: paddle
     run_mode: paddle
     batch_size: 1
     batch_size: 1

+ 1 - 1
paddlex/configs/image_classification/PP-LCNet_x0_5.yaml

@@ -32,7 +32,7 @@ Evaluate:
 
 
 Predict:
 Predict:
   model_dir: "output/best_model"
   model_dir: "output/best_model"
-  input_path: "/paddle/dataset/paddlex/cls/cls_flowers_examples/images/image_00002.jpg"
+  input_path: "https://paddle-model-ecology.bj.bcebos.com/paddlex/imgs/demo_image/general_image_classification_001.jpg"
   kernel_option:
   kernel_option:
     run_mode: paddle
     run_mode: paddle
     batch_size: 1
     batch_size: 1

+ 1 - 1
paddlex/configs/image_classification/PP-LCNet_x0_75.yaml

@@ -32,7 +32,7 @@ Evaluate:
 
 
 Predict:
 Predict:
   model_dir: "output/best_model"
   model_dir: "output/best_model"
-  input_path: "/paddle/dataset/paddlex/cls/cls_flowers_examples/images/image_00002.jpg"
+  input_path: "https://paddle-model-ecology.bj.bcebos.com/paddlex/imgs/demo_image/general_image_classification_001.jpg"
   kernel_option:
   kernel_option:
     run_mode: paddle
     run_mode: paddle
     batch_size: 1
     batch_size: 1

+ 1 - 1
paddlex/configs/image_classification/PP-LCNet_x1_0.yaml

@@ -32,7 +32,7 @@ Evaluate:
 
 
 Predict:
 Predict:
   model_dir: "output/best_model"
   model_dir: "output/best_model"
-  input_path: "/paddle/dataset/paddlex/cls/cls_flowers_examples/images/image_00002.jpg"
+  input_path: "https://paddle-model-ecology.bj.bcebos.com/paddlex/imgs/demo_image/general_image_classification_001.jpg"
   kernel_option:
   kernel_option:
     run_mode: paddle
     run_mode: paddle
     batch_size: 1
     batch_size: 1

+ 1 - 1
paddlex/configs/image_classification/PP-LCNet_x1_5.yaml

@@ -32,7 +32,7 @@ Evaluate:
 
 
 Predict:
 Predict:
   model_dir: "output/best_model"
   model_dir: "output/best_model"
-  input_path: "/paddle/dataset/paddlex/cls/cls_flowers_examples/images/image_00002.jpg"
+  input_path: "https://paddle-model-ecology.bj.bcebos.com/paddlex/imgs/demo_image/general_image_classification_001.jpg"
   kernel_option:
   kernel_option:
     run_mode: paddle
     run_mode: paddle
     batch_size: 1
     batch_size: 1

+ 1 - 1
paddlex/configs/image_classification/PP-LCNet_x2_0.yaml

@@ -32,7 +32,7 @@ Evaluate:
 
 
 Predict:
 Predict:
   model_dir: "output/best_model"
   model_dir: "output/best_model"
-  input_path: "/paddle/dataset/paddlex/cls/cls_flowers_examples/images/image_00002.jpg"
+  input_path: "https://paddle-model-ecology.bj.bcebos.com/paddlex/imgs/demo_image/general_image_classification_001.jpg"
   kernel_option:
   kernel_option:
     run_mode: paddle
     run_mode: paddle
     batch_size: 1
     batch_size: 1

+ 1 - 1
paddlex/configs/image_classification/PP-LCNet_x2_5.yaml

@@ -32,7 +32,7 @@ Evaluate:
 
 
 Predict:
 Predict:
   model_dir: "output/best_model"
   model_dir: "output/best_model"
-  input_path: "/paddle/dataset/paddlex/cls/cls_flowers_examples/images/image_00002.jpg"
+  input_path: "https://paddle-model-ecology.bj.bcebos.com/paddlex/imgs/demo_image/general_image_classification_001.jpg"
   kernel_option:
   kernel_option:
     run_mode: paddle
     run_mode: paddle
     batch_size: 1
     batch_size: 1

+ 1 - 1
paddlex/configs/image_classification/ResNet101.yaml

@@ -32,7 +32,7 @@ Evaluate:
 
 
 Predict:
 Predict:
   model_dir: "output/best_model"
   model_dir: "output/best_model"
-  input_path: "/paddle/dataset/paddlex/cls/cls_flowers_examples/images/image_00002.jpg"
+  input_path: "https://paddle-model-ecology.bj.bcebos.com/paddlex/imgs/demo_image/general_image_classification_001.jpg"
   kernel_option:
   kernel_option:
     run_mode: paddle
     run_mode: paddle
     batch_size: 1
     batch_size: 1

+ 1 - 1
paddlex/configs/image_classification/ResNet152.yaml

@@ -32,7 +32,7 @@ Evaluate:
 
 
 Predict:
 Predict:
   model_dir: "output/best_model"
   model_dir: "output/best_model"
-  input_path: "/paddle/dataset/paddlex/cls/cls_flowers_examples/images/image_00002.jpg"
+  input_path: "https://paddle-model-ecology.bj.bcebos.com/paddlex/imgs/demo_image/general_image_classification_001.jpg"
   kernel_option:
   kernel_option:
     run_mode: paddle
     run_mode: paddle
     batch_size: 1
     batch_size: 1

+ 1 - 1
paddlex/configs/image_classification/ResNet18.yaml

@@ -32,7 +32,7 @@ Evaluate:
 
 
 Predict:
 Predict:
   model_dir: "output/best_model"
   model_dir: "output/best_model"
-  input_path: "/paddle/dataset/paddlex/cls/cls_flowers_examples/images/image_00002.jpg"
+  input_path: "https://paddle-model-ecology.bj.bcebos.com/paddlex/imgs/demo_image/general_image_classification_001.jpg"
   kernel_option:
   kernel_option:
     run_mode: paddle
     run_mode: paddle
     batch_size: 1
     batch_size: 1

+ 1 - 1
paddlex/configs/image_classification/ResNet34.yaml

@@ -32,7 +32,7 @@ Evaluate:
 
 
 Predict:
 Predict:
   model_dir: "output/best_model"
   model_dir: "output/best_model"
-  input_path: "/paddle/dataset/paddlex/cls/cls_flowers_examples/images/image_00002.jpg"
+  input_path: "https://paddle-model-ecology.bj.bcebos.com/paddlex/imgs/demo_image/general_image_classification_001.jpg"
   kernel_option:
   kernel_option:
     run_mode: paddle
     run_mode: paddle
     batch_size: 1
     batch_size: 1

+ 1 - 1
paddlex/configs/image_classification/ResNet50.yaml

@@ -32,7 +32,7 @@ Evaluate:
 
 
 Predict:
 Predict:
   model_dir: "output/best_model"
   model_dir: "output/best_model"
-  input_path: "/paddle/dataset/paddlex/cls/cls_flowers_examples/images/image_00002.jpg"
+  input_path: "https://paddle-model-ecology.bj.bcebos.com/paddlex/imgs/demo_image/general_image_classification_001.jpg"
   kernel_option:
   kernel_option:
     run_mode: paddle
     run_mode: paddle
     batch_size: 1
     batch_size: 1

+ 1 - 1
paddlex/configs/image_classification/SwinTransformer_base_patch4_window7_224.yaml

@@ -32,7 +32,7 @@ Evaluate:
 
 
 Predict:
 Predict:
   model_dir: "output/best_model"
   model_dir: "output/best_model"
-  input_path: "/paddle/dataset/paddlex/cls/cls_flowers_examples/images/image_00002.jpg"
+  input_path: "https://paddle-model-ecology.bj.bcebos.com/paddlex/imgs/demo_image/general_image_classification_001.jpg"
   kernel_option:
   kernel_option:
     run_mode: paddle
     run_mode: paddle
     batch_size: 1
     batch_size: 1

+ 1 - 1
paddlex/configs/instance_segmentation/Mask-RT-DETR-H.yaml

@@ -31,7 +31,7 @@ Evaluate:
 
 
 Predict:
 Predict:
   model_dir: "output/best_model"
   model_dir: "output/best_model"
-  input_path: "/paddle/dataset/paddlex/instance_seg/instance_seg_coco_examples/images/aircraft-women-fashion-pilot-48797.png"
+  input_path: "https://paddle-model-ecology.bj.bcebos.com/paddlex/imgs/demo_image/general_instance_segmentation_004.png"
   kernel_option:
   kernel_option:
     run_mode: paddle
     run_mode: paddle
     batch_size: 1
     batch_size: 1

+ 1 - 1
paddlex/configs/instance_segmentation/Mask-RT-DETR-L.yaml

@@ -31,7 +31,7 @@ Evaluate:
 
 
 Predict:
 Predict:
   model_dir: "output/best_model"
   model_dir: "output/best_model"
-  input_path: "/paddle/dataset/paddlex/instance_seg/instance_seg_coco_examples/images/aircraft-women-fashion-pilot-48797.png"
+  input_path: "https://paddle-model-ecology.bj.bcebos.com/paddlex/imgs/demo_image/general_instance_segmentation_004.png"
   kernel_option:
   kernel_option:
     run_mode: paddle
     run_mode: paddle
     batch_size: 1
     batch_size: 1

+ 1 - 1
paddlex/configs/object_detection/PP-YOLOE_plus-L.yaml

@@ -31,7 +31,7 @@ Evaluate:
 
 
 Predict:
 Predict:
   model_dir: "output/best_model"
   model_dir: "output/best_model"
-  input_path: "/paddle/dataset/paddlex/det/det_coco_examples/images/road0.png"
+  input_path: "https://paddle-model-ecology.bj.bcebos.com/paddlex/imgs/demo_image/general_object_detection_002.png"
   kernel_option:
   kernel_option:
     run_mode: paddle
     run_mode: paddle
     batch_size: 1
     batch_size: 1

+ 1 - 1
paddlex/configs/object_detection/PP-YOLOE_plus-M.yaml

@@ -31,7 +31,7 @@ Evaluate:
 
 
 Predict:
 Predict:
   model_dir: "output/best_model"
   model_dir: "output/best_model"
-  input_path: "/paddle/dataset/paddlex/det/det_coco_examples/images/road0.png"
+  input_path: "https://paddle-model-ecology.bj.bcebos.com/paddlex/imgs/demo_image/general_object_detection_002.png"
   kernel_option:
   kernel_option:
     run_mode: paddle
     run_mode: paddle
     batch_size: 1
     batch_size: 1

+ 1 - 1
paddlex/configs/object_detection/PP-YOLOE_plus-S.yaml

@@ -31,7 +31,7 @@ Evaluate:
 
 
 Predict:
 Predict:
   model_dir: "output/best_model"
   model_dir: "output/best_model"
-  input_path: "/paddle/dataset/paddlex/det/det_coco_examples/images/road0.png"
+  input_path: "https://paddle-model-ecology.bj.bcebos.com/paddlex/imgs/demo_image/general_object_detection_002.png"
   kernel_option:
   kernel_option:
     run_mode: paddle
     run_mode: paddle
     batch_size: 1
     batch_size: 1

+ 1 - 1
paddlex/configs/object_detection/PP-YOLOE_plus-X.yaml

@@ -31,7 +31,7 @@ Evaluate:
 
 
 Predict:
 Predict:
   model_dir: "output/best_model"
   model_dir: "output/best_model"
-  input_path: "/paddle/dataset/paddlex/det/det_coco_examples/images/road0.png"
+  input_path: "https://paddle-model-ecology.bj.bcebos.com/paddlex/imgs/demo_image/general_object_detection_002.png"
   kernel_option:
   kernel_option:
     run_mode: paddle
     run_mode: paddle
     batch_size: 1
     batch_size: 1

+ 1 - 1
paddlex/configs/object_detection/PicoDet-L.yaml

@@ -31,7 +31,7 @@ Evaluate:
 
 
 Predict:
 Predict:
   model_dir: "output/best_model"
   model_dir: "output/best_model"
-  input_path: "/paddle/dataset/paddlex/det/det_coco_examples/images/road0.png"
+  input_path: "https://paddle-model-ecology.bj.bcebos.com/paddlex/imgs/demo_image/general_object_detection_002.png"
   kernel_option:
   kernel_option:
     run_mode: paddle
     run_mode: paddle
     batch_size: 1
     batch_size: 1

+ 1 - 1
paddlex/configs/object_detection/PicoDet-S.yaml

@@ -31,7 +31,7 @@ Evaluate:
 
 
 Predict:
 Predict:
   model_dir: "output/best_model"
   model_dir: "output/best_model"
-  input_path: "/paddle/dataset/paddlex/det/det_coco_examples/images/road0.png"
+  input_path: "https://paddle-model-ecology.bj.bcebos.com/paddlex/imgs/demo_image/general_object_detection_002.png"
   kernel_option:
   kernel_option:
     run_mode: paddle
     run_mode: paddle
     batch_size: 1
     batch_size: 1

+ 1 - 1
paddlex/configs/object_detection/RT-DETR-H.yaml

@@ -31,7 +31,7 @@ Evaluate:
 
 
 Predict:
 Predict:
   model_dir: "output/best_model"
   model_dir: "output/best_model"
-  input_path: "/paddle/dataset/paddlex/det/det_coco_examples/images/road0.png"
+  input_path: "https://paddle-model-ecology.bj.bcebos.com/paddlex/imgs/demo_image/general_object_detection_002.png"
   kernel_option:
   kernel_option:
     run_mode: paddle
     run_mode: paddle
     batch_size: 1
     batch_size: 1

+ 1 - 1
paddlex/configs/object_detection/RT-DETR-L.yaml

@@ -31,7 +31,7 @@ Evaluate:
 
 
 Predict:
 Predict:
   model_dir: "output/best_model"
   model_dir: "output/best_model"
-  input_path: "/paddle/dataset/paddlex/det/det_coco_examples/images/road0.png"
+  input_path: "https://paddle-model-ecology.bj.bcebos.com/paddlex/imgs/demo_image/general_object_detection_002.png"
   kernel_option:
   kernel_option:
     run_mode: paddle
     run_mode: paddle
     batch_size: 1
     batch_size: 1

+ 1 - 1
paddlex/configs/object_detection/RT-DETR-R18.yaml

@@ -31,7 +31,7 @@ Evaluate:
 
 
 Predict:
 Predict:
   model_dir: "output/best_model"
   model_dir: "output/best_model"
-  input_path: "/paddle/dataset/paddlex/det/det_coco_examples/images/road0.png"
+  input_path: "https://paddle-model-ecology.bj.bcebos.com/paddlex/imgs/demo_image/general_object_detection_002.png"
   kernel_option:
   kernel_option:
     run_mode: paddle
     run_mode: paddle
     batch_size: 1
     batch_size: 1

+ 1 - 1
paddlex/configs/object_detection/RT-DETR-R50.yaml

@@ -31,7 +31,7 @@ Evaluate:
 
 
 Predict:
 Predict:
   model_dir: "output/best_model"
   model_dir: "output/best_model"
-  input_path: "/paddle/dataset/paddlex/det/det_coco_examples/images/road0.png"
+  input_path: "https://paddle-model-ecology.bj.bcebos.com/paddlex/imgs/demo_image/general_object_detection_002.png"
   kernel_option:
   kernel_option:
     run_mode: paddle
     run_mode: paddle
     batch_size: 1
     batch_size: 1

+ 1 - 1
paddlex/configs/object_detection/RT-DETR-X.yaml

@@ -31,7 +31,7 @@ Evaluate:
 
 
 Predict:
 Predict:
   model_dir: "output/best_model"
   model_dir: "output/best_model"
-  input_path: "/paddle/dataset/paddlex/det/det_coco_examples/images/road0.png"
+  input_path: "https://paddle-model-ecology.bj.bcebos.com/paddlex/imgs/demo_image/general_object_detection_002.png"
   kernel_option:
   kernel_option:
     run_mode: paddle
     run_mode: paddle
     batch_size: 1
     batch_size: 1

+ 1 - 1
paddlex/configs/semantic_segmentation/Deeplabv3-R101.yaml

@@ -31,7 +31,7 @@ Evaluate:
 
 
 Predict:
 Predict:
   model_dir: "output/best_model/model"
   model_dir: "output/best_model/model"
-  input_path: "/paddle/dataset/paddlex/seg/seg_optic_examples/images/H0002.jpg"
+  input_path: "https://paddle-model-ecology.bj.bcebos.com/paddlex/imgs/demo_image/general_semantic_segmentation_002.png"
   kernel_option:
   kernel_option:
     run_mode: paddle
     run_mode: paddle
     batch_size: 1
     batch_size: 1

+ 1 - 1
paddlex/configs/semantic_segmentation/Deeplabv3-R50.yaml

@@ -31,7 +31,7 @@ Evaluate:
 
 
 Predict:
 Predict:
   model_dir: "output/best_model/model"
   model_dir: "output/best_model/model"
-  input_path: "/paddle/dataset/paddlex/seg/seg_optic_examples/images/H0002.jpg"
+  input_path: "https://paddle-model-ecology.bj.bcebos.com/paddlex/imgs/demo_image/general_semantic_segmentation_002.png"
   kernel_option:
   kernel_option:
     run_mode: paddle
     run_mode: paddle
     batch_size: 1
     batch_size: 1

+ 1 - 1
paddlex/configs/semantic_segmentation/Deeplabv3_Plus-R101.yaml

@@ -31,7 +31,7 @@ Evaluate:
 
 
 Predict:
 Predict:
   model_dir: "output/best_model/model"
   model_dir: "output/best_model/model"
-  input_path: "/paddle/dataset/paddlex/seg/seg_optic_examples/images/H0002.jpg"
+  input_path: "https://paddle-model-ecology.bj.bcebos.com/paddlex/imgs/demo_image/general_semantic_segmentation_002.png"
   kernel_option:
   kernel_option:
     run_mode: paddle
     run_mode: paddle
     batch_size: 1
     batch_size: 1

+ 1 - 1
paddlex/configs/semantic_segmentation/Deeplabv3_Plus-R50.yaml

@@ -31,7 +31,7 @@ Evaluate:
 
 
 Predict:
 Predict:
   model_dir: "output/best_model/model"
   model_dir: "output/best_model/model"
-  input_path: "/paddle/dataset/paddlex/seg/seg_optic_examples/images/H0002.jpg"
+  input_path: "https://paddle-model-ecology.bj.bcebos.com/paddlex/imgs/demo_image/general_semantic_segmentation_002.png"
   kernel_option:
   kernel_option:
     run_mode: paddle
     run_mode: paddle
     batch_size: 1
     batch_size: 1

+ 1 - 1
paddlex/configs/semantic_segmentation/OCRNet_HRNet-W48.yaml

@@ -31,7 +31,7 @@ Evaluate:
 
 
 Predict:
 Predict:
   model_dir: "output/best_model/model"
   model_dir: "output/best_model/model"
-  input_path: "/paddle/dataset/paddlex/seg/seg_optic_examples/images/H0002.jpg"
+  input_path: "https://paddle-model-ecology.bj.bcebos.com/paddlex/imgs/demo_image/general_semantic_segmentation_002.png"
   kernel_option:
   kernel_option:
     run_mode: paddle
     run_mode: paddle
     batch_size: 1
     batch_size: 1

+ 1 - 1
paddlex/configs/semantic_segmentation/PP-LiteSeg-T.yaml

@@ -31,7 +31,7 @@ Evaluate:
 
 
 Predict:
 Predict:
   model_dir: "output/best_model/model"
   model_dir: "output/best_model/model"
-  input_path: "/paddle/dataset/paddlex/seg/seg_optic_examples/images/H0002.jpg"
+  input_path: "https://paddle-model-ecology.bj.bcebos.com/paddlex/imgs/demo_image/general_semantic_segmentation_002.png"
   kernel_option:
   kernel_option:
     run_mode: paddle
     run_mode: paddle
     batch_size: 1
     batch_size: 1

+ 1 - 1
paddlex/configs/text_detection/PP-OCRv4_mobile_det.yaml

@@ -31,7 +31,7 @@ Evaluate:
 
 
 Predict:
 Predict:
   model_dir: "output/best_accuracy"
   model_dir: "output/best_accuracy"
-  input_path: "/paddle/dataset/paddlex/ocr_det/ocr_det_dataset_examples/images/train_img_100.jpg"
+  input_path: "https://paddle-model-ecology.bj.bcebos.com/paddlex/imgs/demo_image/general_ocr_001.png"
   kernel_option:
   kernel_option:
     run_mode: paddle
     run_mode: paddle
     batch_size: 1
     batch_size: 1

+ 1 - 1
paddlex/configs/text_detection/PP-OCRv4_server_det.yaml

@@ -31,7 +31,7 @@ Evaluate:
 
 
 Predict:
 Predict:
   model_dir: "output/best_accuracy"
   model_dir: "output/best_accuracy"
-  input_path: "/paddle/dataset/paddlex/ocr_det/ocr_det_dataset_examples/images/train_img_100.jpg"
+  input_path: "https://paddle-model-ecology.bj.bcebos.com/paddlex/imgs/demo_image/general_ocr_001.png"
   kernel_option:
   kernel_option:
     run_mode: paddle
     run_mode: paddle
     batch_size: 1
     batch_size: 1

+ 1 - 1
paddlex/configs/text_recognition/PP-OCRv4_mobile_rec.yaml

@@ -30,7 +30,7 @@ Evaluate:
 
 
 Predict:
 Predict:
   model_dir: "output/best_accuracy"
   model_dir: "output/best_accuracy"
-  input_path: "/paddle/dataset/paddlex/ocr_rec/ocr_rec_dataset_examples/images/train_word_1003.png"
+  input_path: "https://paddle-model-ecology.bj.bcebos.com/paddlex/imgs/demo_image/general_ocr_rec_001.png"
   kernel_option:
   kernel_option:
     run_mode: paddle
     run_mode: paddle
     batch_size: 1
     batch_size: 1

+ 1 - 1
paddlex/configs/text_recognition/PP-OCRv4_server_rec.yaml

@@ -30,7 +30,7 @@ Evaluate:
 
 
 Predict:
 Predict:
   model_dir: "output/best_accuracy"
   model_dir: "output/best_accuracy"
-  input_path: "/paddle/dataset/paddlex/ocr_rec/ocr_rec_dataset_examples/images/train_word_1003.png"
+  input_path: "https://paddle-model-ecology.bj.bcebos.com/paddlex/imgs/demo_image/general_ocr_rec_001.png"
   kernel_option:
   kernel_option:
     run_mode: paddle
     run_mode: paddle
     batch_size: 1
     batch_size: 1

+ 2 - 2
paddlex/engine.py

@@ -29,14 +29,14 @@ class Engine(object):
         self.config = config.get_config(
         self.config = config.get_config(
             args.config, overrides=args.override, show=False)
             args.config, overrides=args.override, show=False)
         self.mode = self.config.Global.mode
         self.mode = self.config.Global.mode
-        self.output_dir = self.config.Global.output
+        self.output = self.config.Global.output
 
 
     @try_except_decorator
     @try_except_decorator
     def run(self):
     def run(self):
         """ the main function """
         """ the main function """
         if self.config.Global.mode == "check_dataset":
         if self.config.Global.mode == "check_dataset":
             dataset_checker = build_dataset_checker(self.config)
             dataset_checker = build_dataset_checker(self.config)
-            return dataset_checker.check_dataset()
+            return dataset_checker.check()
         elif self.config.Global.mode == "train":
         elif self.config.Global.mode == "train":
             trainer = build_trainer(self.config)
             trainer = build_trainer(self.config)
             trainer.train()
             trainer.train()

+ 4 - 5
paddlex/modules/base/dataset_checker/dataset_checker.py

@@ -49,10 +49,9 @@ class BaseDatasetChecker(ABC, metaclass=AutoRegisterABCMetaClass):
         super().__init__()
         super().__init__()
         self.global_config = config.Global
         self.global_config = config.Global
         self.check_dataset_config = config.CheckDataset
         self.check_dataset_config = config.CheckDataset
-        self.output_dir = os.path.join(self.global_config.output,
-                                       "check_dataset")
+        self.output = os.path.join(self.global_config.output, "check_dataset")
 
 
-    def check_dataset(self) -> dict:
+    def check(self) -> dict:
         """execute dataset checking
         """execute dataset checking
 
 
         Returns:
         Returns:
@@ -60,8 +59,8 @@ class BaseDatasetChecker(ABC, metaclass=AutoRegisterABCMetaClass):
         """
         """
         dataset_dir = self.get_dataset_root(self.global_config.dataset_dir)
         dataset_dir = self.get_dataset_root(self.global_config.dataset_dir)
 
 
-        if not os.path.exists(self.output_dir):
-            os.makedirs(self.output_dir)
+        if not os.path.exists(self.output):
+            os.makedirs(self.output)
 
 
         if self.check_dataset_config.get("convert", None):
         if self.check_dataset_config.get("convert", None):
             if self.check_dataset_config.convert.get("enable", False):
             if self.check_dataset_config.convert.get("enable", False):

+ 7 - 12
paddlex/modules/base/evaluator.py

@@ -100,7 +100,13 @@ class BaseEvaluator(ABC, metaclass=AutoRegisterABCMetaClass):
         Returns:
         Returns:
             dict: the evaluation metrics
             dict: the evaluation metrics
         """
         """
-        metrics = self.eval()
+        self.update_config()
+        # self.dump_config()
+        evaluate_result = self.pdx_model.evaluate(**self.get_eval_kwargs())
+        assert evaluate_result.returncode == 0, f"Encountered an unexpected error({evaluate_result.returncode}) in \
+evaling!"
+
+        metrics = evaluate_result.metrics
         assert self.check_return(
         assert self.check_return(
             metrics
             metrics
         ), f"The return value({metrics}) of Evaluator.eval() is illegal!"
         ), f"The return value({metrics}) of Evaluator.eval() is illegal!"
@@ -118,17 +124,6 @@ class BaseEvaluator(ABC, metaclass=AutoRegisterABCMetaClass):
                                             "config.yaml")
                                             "config.yaml")
         self.pdx_config.dump(config_file_path)
         self.pdx_config.dump(config_file_path)
 
 
-    def eval(self):
-        """firstly, update evaluation config, then evaluate model, finally return the evaluation result
-        """
-        self.update_config()
-        # self.dump_config()
-        evaluate_result = self.pdx_model.evaluate(**self.get_eval_kwargs())
-        assert evaluate_result.returncode == 0, f"Encountered an unexpected error({evaluate_result.returncode}) in \
-evaling!"
-
-        return evaluate_result.metrics
-
     def get_device(self, using_device_number: int=None) -> str:
     def get_device(self, using_device_number: int=None) -> str:
         """get device setting from config
         """get device setting from config
 
 

+ 5 - 5
paddlex/modules/base/predictor/predictor.py

@@ -38,13 +38,13 @@ class BasePredictor(ABC, FromDictMixin, Node):
     def __init__(self,
     def __init__(self,
                  model_dir,
                  model_dir,
                  kernel_option,
                  kernel_option,
-                 output_dir,
+                 output,
                  pre_transforms=None,
                  pre_transforms=None,
                  post_transforms=None):
                  post_transforms=None):
         super().__init__()
         super().__init__()
         self.model_dir = model_dir
         self.model_dir = model_dir
         self.kernel_option = kernel_option
         self.kernel_option = kernel_option
-        self.output_dir = output_dir
+        self.output = output
         self.other_src = self.load_other_src()
         self.other_src = self.load_other_src()
 
 
         logging.debug(
         logging.debug(
@@ -171,7 +171,7 @@ class PredictorBuilderByConfig(object):
         self.predictor = BasePredictor.get(model_name)(
         self.predictor = BasePredictor.get(model_name)(
             model_dir=model_dir,
             model_dir=model_dir,
             kernel_option=kernel_option,
             kernel_option=kernel_option,
-            output_dir=config.Global.output_dir,
+            output=config.Global.output,
             **predict_config)
             **predict_config)
 
 
     def predict(self):
     def predict(self):
@@ -189,7 +189,7 @@ def build_predictor(*args, **kwargs):
 def create_model(model_name,
 def create_model(model_name,
                  model_dir=None,
                  model_dir=None,
                  kernel_option=None,
                  kernel_option=None,
-                 output_dir=None,
+                 output="./",
                  pre_transforms=None,
                  pre_transforms=None,
                  post_transforms=None,
                  post_transforms=None,
                  *args,
                  *args,
@@ -206,7 +206,7 @@ def create_model(model_name,
             BasePredictor.get(model_name)
             BasePredictor.get(model_name)
     return BasePredictor.get(model_name)(model_dir=model_dir,
     return BasePredictor.get(model_name)(model_dir=model_dir,
                                          kernel_option=kernel_option,
                                          kernel_option=kernel_option,
-                                         output_dir=output_dir,
+                                         output=output,
                                          pre_transforms=pre_transforms,
                                          pre_transforms=pre_transforms,
                                          post_transforms=post_transforms,
                                          post_transforms=post_transforms,
                                          *args,
                                          *args,

+ 1 - 1
paddlex/modules/base/predictor/utils/official_models.py

@@ -171,7 +171,7 @@ class OfficialModelsDict(dict):
     def __getitem__(self, key):
     def __getitem__(self, key):
         url = super().__getitem__(key)
         url = super().__getitem__(key)
         save_dir = Path(CACHE_DIR) / "official_models"
         save_dir = Path(CACHE_DIR) / "official_models"
-        download_and_extract(url, save_dir, f"{key}", overwrite=True)
+        download_and_extract(url, save_dir, f"{key}", overwrite=False)
         return save_dir / f"{key}"
         return save_dir / f"{key}"
 
 
 
 

+ 4 - 5
paddlex/modules/base/trainer/train_deamon.py

@@ -51,7 +51,7 @@ class BaseTrainDeamon(ABC):
         """ init """
         """ init """
         self.global_config = global_config
         self.global_config = global_config
         self.init_pre_hook()
         self.init_pre_hook()
-        self.output_dir = global_config.output
+        self.output = global_config.output
         self.train_outputs = self.get_train_outputs()
         self.train_outputs = self.get_train_outputs()
         self.save_paths = self.get_save_paths()
         self.save_paths = self.get_save_paths()
         self.results = self.init_train_result()
         self.results = self.init_train_result()
@@ -90,7 +90,7 @@ class BaseTrainDeamon(ABC):
 
 
     def get_train_outputs(self):
     def get_train_outputs(self):
         """ get training outputs dir """
         """ get training outputs dir """
-        return [Path(self.output_dir)]
+        return [Path(self.output)]
 
 
     def init_model_names(self):
     def init_model_names(self):
         """ get models name """
         """ get models name """
@@ -99,8 +99,7 @@ class BaseTrainDeamon(ABC):
     def get_save_paths(self):
     def get_save_paths(self):
         """ get the path to save train_result.json """
         """ get the path to save train_result.json """
         return [
         return [
-            Path(self.output_dir, save_name)
-            for save_name in self.get_save_names()
+            Path(self.output, save_name) for save_name in self.get_save_names()
         ]
         ]
 
 
     def init_configs(self):
     def init_configs(self):
@@ -125,7 +124,7 @@ class BaseTrainDeamon(ABC):
         return model_pkg
         return model_pkg
 
 
     def normlize_path(self, dict_obj, relative_to):
     def normlize_path(self, dict_obj, relative_to):
-        """ normlize path to string type path relative to the output_dir """
+        """ normlize path to string type path relative to the output """
         for key in dict_obj:
         for key in dict_obj:
             if isinstance(dict_obj[key], dict):
             if isinstance(dict_obj[key], dict):
                 self.normlize_path(dict_obj[key], relative_to)
                 self.normlize_path(dict_obj[key], relative_to)

+ 6 - 10
paddlex/modules/base/trainer/trainer.py

@@ -56,7 +56,12 @@ class BaseTrainer(ABC, metaclass=AutoRegisterABCMetaClass):
         """execute model training
         """execute model training
         """
         """
         os.makedirs(self.global_config.output, exist_ok=True)
         os.makedirs(self.global_config.output, exist_ok=True)
-        self.train(*args, **kwargs)
+        self.update_config()
+        self.dump_config()
+        train_result = self.pdx_model.train(**self.get_train_kwargs())
+        assert train_result.returncode == 0, f"Encountered an unexpected error({train_result.returncode}) in \
+training!"
+
         self.deamon.stop()
         self.deamon.stop()
 
 
     def dump_config(self, config_file_path: str=None):
     def dump_config(self, config_file_path: str=None):
@@ -71,15 +76,6 @@ class BaseTrainer(ABC, metaclass=AutoRegisterABCMetaClass):
                                             "config.yaml")
                                             "config.yaml")
         self.pdx_config.dump(config_file_path)
         self.pdx_config.dump(config_file_path)
 
 
-    def train(self):
-        """firstly, update and dump train config, then train model
-        """
-        self.update_config()
-        self.dump_config()
-        train_result = self.pdx_model.train(**self.get_train_kwargs())
-        assert train_result.returncode == 0, f"Encountered an unexpected error({train_result.returncode}) in \
-training!"
-
     def get_device(self, using_device_number: int=None) -> str:
     def get_device(self, using_device_number: int=None) -> str:
         """get device setting from config
         """get device setting from config
 
 

+ 4 - 4
paddlex/modules/image_classification/dataset_checker/__init__.py

@@ -17,13 +17,13 @@ from pathlib import Path
 
 
 from ...base import BaseDatasetChecker
 from ...base import BaseDatasetChecker
 from .dataset_src import check, split_dataset, deep_analyse
 from .dataset_src import check, split_dataset, deep_analyse
-from ..support_models import SUPPORT_MODELS
+from ..model_list import MODELS
 
 
 
 
 class ClsDatasetChecker(BaseDatasetChecker):
 class ClsDatasetChecker(BaseDatasetChecker):
     """Dataset Checker for Image Classification Model
     """Dataset Checker for Image Classification Model
     """
     """
-    support_models = SUPPORT_MODELS
+    entities = MODELS
     sample_num = 10
     sample_num = 10
 
 
     def get_dataset_root(self, dataset_dir: str) -> str:
     def get_dataset_root(self, dataset_dir: str) -> str:
@@ -74,7 +74,7 @@ class ClsDatasetChecker(BaseDatasetChecker):
         Returns:
         Returns:
             dict: dataset summary.
             dict: dataset summary.
         """
         """
-        return check(dataset_dir, self.output_dir)
+        return check(dataset_dir, self.output)
 
 
     def analyse(self, dataset_dir: str) -> dict:
     def analyse(self, dataset_dir: str) -> dict:
         """deep analyse dataset
         """deep analyse dataset
@@ -85,7 +85,7 @@ class ClsDatasetChecker(BaseDatasetChecker):
         Returns:
         Returns:
             dict: the deep analysis results.
             dict: the deep analysis results.
         """
         """
-        return deep_analyse(dataset_dir, self.output_dir)
+        return deep_analyse(dataset_dir, self.output)
 
 
     def get_show_type(self) -> str:
     def get_show_type(self) -> str:
         """get the show type of dataset
         """get the show type of dataset

+ 2 - 2
paddlex/modules/image_classification/dataset_checker/dataset_src/analyse_dataset.py

@@ -31,7 +31,7 @@ from .....utils.file_interface import custom_open
 from .....utils.fonts import PINGFANG_FONT_FILE_PATH
 from .....utils.fonts import PINGFANG_FONT_FILE_PATH
 
 
 
 
-def deep_analyse(dataset_path, output_dir):
+def deep_analyse(dataset_path, output):
     """class analysis for dataset"""
     """class analysis for dataset"""
     tags = ['train', 'val']
     tags = ['train', 'val']
     labels_cnt = defaultdict(str)
     labels_cnt = defaultdict(str)
@@ -90,7 +90,7 @@ def deep_analyse(dataset_path, output_dir):
         fontsize=12)
         fontsize=12)
     plt.legend(loc=1)
     plt.legend(loc=1)
     fig.tight_layout()
     fig.tight_layout()
-    file_path = os.path.join(output_dir, "histogram.png")
+    file_path = os.path.join(output, "histogram.png")
     fig.savefig(file_path, dpi=300)
     fig.savefig(file_path, dpi=300)
 
 
     return {"histogram": os.path.join("check_dataset", "histogram.png")}
     return {"histogram": os.path.join("check_dataset", "histogram.png")}

+ 4 - 5
paddlex/modules/image_classification/dataset_checker/dataset_src/check_dataset.py

@@ -24,7 +24,7 @@ from .....utils.errors import DatasetFileNotFoundError, CheckFailedError
 from .utils.visualizer import draw_label
 from .utils.visualizer import draw_label
 
 
 
 
-def check(dataset_dir, output_dir, sample_num=10):
+def check(dataset_dir, output, sample_num=10):
     """ check dataset """
     """ check dataset """
     dataset_dir = osp.abspath(dataset_dir)
     dataset_dir = osp.abspath(dataset_dir)
     # Custom dataset
     # Custom dataset
@@ -95,7 +95,7 @@ def check(dataset_dir, output_dir, sample_num=10):
                     if not osp.exists(img_path):
                     if not osp.exists(img_path):
                         raise DatasetFileNotFoundError(file_path=img_path)
                         raise DatasetFileNotFoundError(file_path=img_path)
 
 
-                    vis_save_dir = osp.join(output_dir, 'demo_img')
+                    vis_save_dir = osp.join(output, 'demo_img')
                     if not osp.exists(vis_save_dir):
                     if not osp.exists(vis_save_dir):
                         os.makedirs(vis_save_dir)
                         os.makedirs(vis_save_dir)
 
 
@@ -107,8 +107,7 @@ def check(dataset_dir, output_dir, sample_num=10):
                                             osp.basename(file_name))
                                             osp.basename(file_name))
                         vis_im.save(vis_path)
                         vis_im.save(vis_path)
                         sample_path = osp.join(
                         sample_path = osp.join(
-                            'check_dataset',
-                            os.path.relpath(vis_path, output_dir))
+                            'check_dataset', os.path.relpath(vis_path, output))
                         sample_paths[tag].append(sample_path)
                         sample_paths[tag].append(sample_path)
 
 
                     try:
                     try:
@@ -121,7 +120,7 @@ def check(dataset_dir, output_dir, sample_num=10):
     num_classes = max(labels) + 1
     num_classes = max(labels) + 1
 
 
     attrs = {}
     attrs = {}
-    attrs['label_file'] = osp.relpath(label_file, output_dir)
+    attrs['label_file'] = osp.relpath(label_file, output)
     attrs['num_classes'] = num_classes
     attrs['num_classes'] = num_classes
     attrs['train_samples'] = sample_cnts['train']
     attrs['train_samples'] = sample_cnts['train']
     attrs['train_sample_paths'] = sample_paths['train']
     attrs['train_sample_paths'] = sample_paths['train']

+ 2 - 2
paddlex/modules/image_classification/evaluator.py

@@ -14,12 +14,12 @@
 
 
 
 
 from ..base import BaseEvaluator
 from ..base import BaseEvaluator
-from .support_models import SUPPORT_MODELS
+from .model_list import MODELS
 
 
 
 
 class ClsEvaluator(BaseEvaluator):
 class ClsEvaluator(BaseEvaluator):
     """ Image Classification Model Evaluator """
     """ Image Classification Model Evaluator """
-    support_models = SUPPORT_MODELS
+    entities = MODELS
 
 
     def update_config(self):
     def update_config(self):
         """update evalution config
         """update evalution config

+ 1 - 1
paddlex/modules/image_classification/support_models.py → paddlex/modules/image_classification/model_list.py

@@ -13,7 +13,7 @@
 # limitations under the License.
 # limitations under the License.
 
 
 
 
-SUPPORT_MODELS = [
+MODELS = [
     'CLIP_vit_base_patch16_224',
     'CLIP_vit_base_patch16_224',
     'CLIP_vit_large_patch14_224',
     'CLIP_vit_large_patch14_224',
     'ConvNeXt_tiny',
     'ConvNeXt_tiny',

+ 3 - 3
paddlex/modules/image_classification/predictor/predictor.py

@@ -24,12 +24,12 @@ from .keys import ClsKeys as K
 from .utils import InnerConfig
 from .utils import InnerConfig
 from ....utils import logging
 from ....utils import logging
 from . import transforms as T
 from . import transforms as T
-from ..support_models import SUPPORT_MODELS
+from ..model_list import MODELS
 
 
 
 
 class ClsPredictor(BasePredictor):
 class ClsPredictor(BasePredictor):
     """ Clssification Predictor """
     """ Clssification Predictor """
-    support_models = SUPPORT_MODELS
+    entities = MODELS
 
 
     def load_other_src(self):
     def load_other_src(self):
         """ load the inner config file """
         """ load the inner config file """
@@ -77,7 +77,7 @@ class ClsPredictor(BasePredictor):
         """ get postprocess transforms """
         """ get postprocess transforms """
         post_transforms = self.other_src.post_transforms
         post_transforms = self.other_src.post_transforms
         post_transforms.extend([
         post_transforms.extend([
-            T.PrintResult(), T.SaveClsResults(self.output_dir,
+            T.PrintResult(), T.SaveClsResults(self.output,
                                               self.other_src.labels)
                                               self.other_src.labels)
         ])
         ])
         return post_transforms
         return post_transforms

+ 1 - 1
paddlex/modules/image_classification/predictor/transforms.py

@@ -166,7 +166,7 @@ class SaveClsResults(BaseTransform):
         pred = data[K.CLS_PRED]
         pred = data[K.CLS_PRED]
         index = pred.argsort(axis=0)[-1].astype("int32")
         index = pred.argsort(axis=0)[-1].astype("int32")
         score = pred[index].item()
         score = pred[index].item()
-        label = self.class_id_map[int(index)]
+        label = self.class_id_map[int(index)] if self.class_id_map else ""
         label_str = f"{label} {score:.2f}"
         label_str = f"{label} {score:.2f}"
         file_name = os.path.basename(ori_path)
         file_name = os.path.basename(ori_path)
         save_path = os.path.join(self.save_dir, file_name)
         save_path = os.path.join(self.save_dir, file_name)

+ 5 - 2
paddlex/modules/image_classification/predictor/utils.py

@@ -73,13 +73,16 @@ class InnerConfig(object):
     @property
     @property
     def post_transforms(self):
     def post_transforms(self):
         """ read postprocess transforms from config file """
         """ read postprocess transforms from config file """
+        IGNORE_OPS = ['main_indicator', 'SavePreLabel']
         tfs_cfg = self.inner_cfg['PostProcess']
         tfs_cfg = self.inner_cfg['PostProcess']
         tfs = []
         tfs = []
         for tf_key in tfs_cfg:
         for tf_key in tfs_cfg:
             if tf_key == 'Topk':
             if tf_key == 'Topk':
                 tf = T.Topk(
                 tf = T.Topk(
                     topk=tfs_cfg['Topk']['topk'],
                     topk=tfs_cfg['Topk']['topk'],
-                    class_ids=tfs_cfg['Topk']['label_list'])
+                    class_ids=tfs_cfg['Topk'].get('label_list', None))
+            elif tf_key in IGNORE_OPS:
+                continue
             else:
             else:
                 raise RuntimeError(f"Unsupported type: {tf_key}")
                 raise RuntimeError(f"Unsupported type: {tf_key}")
             tfs.append(tf)
             tfs.append(tf)
@@ -88,4 +91,4 @@ class InnerConfig(object):
     @property
     @property
     def labels(self):
     def labels(self):
         """ the labels in inner config """
         """ the labels in inner config """
-        return self.inner_cfg["PostProcess"]["Topk"]["label_list"]
+        return self.inner_cfg['PostProcess']['Topk'].get('label_list', None)

+ 2 - 2
paddlex/modules/image_classification/trainer.py

@@ -19,13 +19,13 @@ import paddle
 from pathlib import Path
 from pathlib import Path
 
 
 from ..base import BaseTrainer, BaseTrainDeamon
 from ..base import BaseTrainer, BaseTrainDeamon
-from .support_models import SUPPORT_MODELS
+from .model_list import MODELS
 from ...utils.config import AttrDict
 from ...utils.config import AttrDict
 
 
 
 
 class ClsTrainer(BaseTrainer):
 class ClsTrainer(BaseTrainer):
     """ Image Classification Model Trainer """
     """ Image Classification Model Trainer """
-    support_models = SUPPORT_MODELS
+    entities = MODELS
 
 
     def dump_label_dict(self, src_label_dict_path: str):
     def dump_label_dict(self, src_label_dict_path: str):
         """dump label dict config
         """dump label dict config

+ 4 - 4
paddlex/modules/instance_segmentation/dataset_checker/__init__.py

@@ -18,13 +18,13 @@ import os
 from .dataset_src import check, convert, split_dataset, deep_analyse
 from .dataset_src import check, convert, split_dataset, deep_analyse
 from ...base import BaseDatasetChecker
 from ...base import BaseDatasetChecker
 
 
-from ..support_models import SUPPORT_MODELS
+from ..model_list import MODELS
 
 
 
 
 class COCOInstSegDatasetChecker(BaseDatasetChecker):
 class COCOInstSegDatasetChecker(BaseDatasetChecker):
     """Dataset Checker for Instance Segmentation Model
     """Dataset Checker for Instance Segmentation Model
     """
     """
-    support_models = SUPPORT_MODELS
+    entities = MODELS
     sample_num = 10
     sample_num = 10
 
 
     def convert_dataset(self, src_dataset_dir: str) -> str:
     def convert_dataset(self, src_dataset_dir: str) -> str:
@@ -62,7 +62,7 @@ class COCOInstSegDatasetChecker(BaseDatasetChecker):
         Returns:
         Returns:
             dict: dataset summary.
             dict: dataset summary.
         """
         """
-        return check(dataset_dir, self.output_dir)
+        return check(dataset_dir, self.output)
 
 
     def analyse(self, dataset_dir: str) -> dict:
     def analyse(self, dataset_dir: str) -> dict:
         """deep analyse dataset
         """deep analyse dataset
@@ -73,7 +73,7 @@ class COCOInstSegDatasetChecker(BaseDatasetChecker):
         Returns:
         Returns:
             dict: the deep analysis results.
             dict: the deep analysis results.
         """
         """
-        return deep_analyse(dataset_dir, self.output_dir)
+        return deep_analyse(dataset_dir, self.output)
 
 
     def get_show_type(self) -> str:
     def get_show_type(self) -> str:
         """get the show type of dataset
         """get the show type of dataset

+ 2 - 2
paddlex/modules/instance_segmentation/dataset_checker/dataset_src/analyse_dataset.py

@@ -27,7 +27,7 @@ from pycocotools.coco import COCO
 from .....utils.fonts import PINGFANG_FONT_FILE_PATH
 from .....utils.fonts import PINGFANG_FONT_FILE_PATH
 
 
 
 
-def deep_analyse(dataset_dir, output_dir):
+def deep_analyse(dataset_dir, output):
     """class analysis for dataset"""
     """class analysis for dataset"""
     tags = ['train', 'val']
     tags = ['train', 'val']
     all_instances = 0
     all_instances = 0
@@ -76,6 +76,6 @@ def deep_analyse(dataset_dir, output_dir):
     ax.set_ylabel('Counts')
     ax.set_ylabel('Counts')
     plt.legend()
     plt.legend()
     fig.tight_layout()
     fig.tight_layout()
-    fig_path = os.path.join(output_dir, "histogram.png")
+    fig_path = os.path.join(output, "histogram.png")
     fig.savefig(fig_path)
     fig.savefig(fig_path)
     return {"histogram": os.path.join("check_dataset", "histogram.png")}
     return {"histogram": os.path.join("check_dataset", "histogram.png")}

+ 3 - 3
paddlex/modules/instance_segmentation/dataset_checker/dataset_src/check_dataset.py

@@ -27,7 +27,7 @@ from .....utils.errors import DatasetFileNotFoundError
 from .....utils.logging import info
 from .....utils.logging import info
 
 
 
 
-def check(dataset_dir, output_dir, sample_num=10):
+def check(dataset_dir, output, sample_num=10):
     """ check dataset """
     """ check dataset """
     info(dataset_dir)
     info(dataset_dir)
     dataset_dir = osp.abspath(dataset_dir)
     dataset_dir = osp.abspath(dataset_dir)
@@ -58,7 +58,7 @@ def check(dataset_dir, output_dir, sample_num=10):
             coco = COCO(file_list)
             coco = COCO(file_list)
             num_class = len(coco.getCatIds())
             num_class = len(coco.getCatIds())
 
 
-            vis_save_dir = osp.join(output_dir, 'demo_img')
+            vis_save_dir = osp.join(output, 'demo_img')
 
 
             image_info = jsondata['images']
             image_info = jsondata['images']
             for i in range(sample_num):
             for i in range(sample_num):
@@ -75,7 +75,7 @@ def check(dataset_dir, output_dir, sample_num=10):
                 Path(vis_path).parent.mkdir(parents=True, exist_ok=True)
                 Path(vis_path).parent.mkdir(parents=True, exist_ok=True)
                 vis_im.save(vis_path)
                 vis_im.save(vis_path)
                 sample_path = osp.join('check_dataset',
                 sample_path = osp.join('check_dataset',
-                                       os.path.relpath(vis_path, output_dir))
+                                       os.path.relpath(vis_path, output))
                 sample_paths[tag].append(sample_path)
                 sample_paths[tag].append(sample_path)
 
 
     attrs = {}
     attrs = {}

+ 2 - 2
paddlex/modules/instance_segmentation/evaluator.py

@@ -14,12 +14,12 @@
 
 
 
 
 from ..object_detection import DetEvaluator
 from ..object_detection import DetEvaluator
-from .support_models import SUPPORT_MODELS
+from .model_list import MODELS
 
 
 
 
 class InstanceSegEvaluator(DetEvaluator):
 class InstanceSegEvaluator(DetEvaluator):
     """ Instance Segmentation Model Evaluator """
     """ Instance Segmentation Model Evaluator """
-    support_models = SUPPORT_MODELS
+    entities = MODELS
 
 
     def update_config(self):
     def update_config(self):
         """update evalution config
         """update evalution config

+ 1 - 1
paddlex/modules/instance_segmentation/support_models.py → paddlex/modules/instance_segmentation/model_list.py

@@ -13,7 +13,7 @@
 # limitations under the License.
 # limitations under the License.
 
 
 
 
-SUPPORT_MODELS = [
+MODELS = [
     'Mask-RT-DETR-H',
     'Mask-RT-DETR-H',
     'Mask-RT-DETR-L',
     'Mask-RT-DETR-L',
 ]
 ]

+ 2 - 2
paddlex/modules/instance_segmentation/predictor/predictor.py

@@ -16,12 +16,12 @@
 import numpy as np
 import numpy as np
 from ...object_detection import DetPredictor
 from ...object_detection import DetPredictor
 from .keys import InstanceSegKeys as K
 from .keys import InstanceSegKeys as K
-from ..support_models import SUPPORT_MODELS
+from ..model_list import MODELS
 
 
 
 
 class InstanceSegPredictor(DetPredictor):
 class InstanceSegPredictor(DetPredictor):
     """ Instance Seg Predictor """
     """ Instance Seg Predictor """
-    support_models = SUPPORT_MODELS
+    entities = MODELS
 
 
     def _run(self, batch_input):
     def _run(self, batch_input):
         """ run """
         """ run """

+ 2 - 2
paddlex/modules/instance_segmentation/trainer.py

@@ -14,12 +14,12 @@
 
 
 
 
 from ..object_detection import DetTrainer
 from ..object_detection import DetTrainer
-from .support_models import SUPPORT_MODELS
+from .model_list import MODELS
 
 
 
 
 class InstanceSegTrainer(DetTrainer):
 class InstanceSegTrainer(DetTrainer):
     """ Instance Segmentation Model Trainer """
     """ Instance Segmentation Model Trainer """
-    support_models = SUPPORT_MODELS
+    entities = MODELS
 
 
     def _update_dataset(self):
     def _update_dataset(self):
         """update dataset settings
         """update dataset settings

+ 4 - 4
paddlex/modules/object_detection/dataset_checker/__init__.py

@@ -25,13 +25,13 @@ from pycocotools.coco import COCO
 from ...base import BaseDatasetChecker
 from ...base import BaseDatasetChecker
 from .dataset_src import check, convert, split_dataset, deep_analyse
 from .dataset_src import check, convert, split_dataset, deep_analyse
 
 
-from ..support_models import SUPPORT_MODELS
+from ..model_list import MODELS
 
 
 
 
 class COCODatasetChecker(BaseDatasetChecker):
 class COCODatasetChecker(BaseDatasetChecker):
     """Dataset Checker for Object Detection Model
     """Dataset Checker for Object Detection Model
     """
     """
-    support_models = SUPPORT_MODELS
+    entities = MODELS
     sample_num = 10
     sample_num = 10
 
 
     def get_dataset_root(self, dataset_dir: str) -> str:
     def get_dataset_root(self, dataset_dir: str) -> str:
@@ -83,7 +83,7 @@ class COCODatasetChecker(BaseDatasetChecker):
         Returns:
         Returns:
             dict: dataset summary.
             dict: dataset summary.
         """
         """
-        return check(dataset_dir, self.output_dir)
+        return check(dataset_dir, self.output)
 
 
     def analyse(self, dataset_dir: str) -> dict:
     def analyse(self, dataset_dir: str) -> dict:
         """deep analyse dataset
         """deep analyse dataset
@@ -94,7 +94,7 @@ class COCODatasetChecker(BaseDatasetChecker):
         Returns:
         Returns:
             dict: the deep analysis results.
             dict: the deep analysis results.
         """
         """
-        return deep_analyse(dataset_dir, self.output_dir)
+        return deep_analyse(dataset_dir, self.output)
 
 
     def get_show_type(self) -> str:
     def get_show_type(self) -> str:
         """get the show type of dataset
         """get the show type of dataset

+ 2 - 2
paddlex/modules/object_detection/dataset_checker/dataset_src/analyse_dataset.py

@@ -29,7 +29,7 @@ from pycocotools.coco import COCO
 from .....utils.fonts import PINGFANG_FONT_FILE_PATH
 from .....utils.fonts import PINGFANG_FONT_FILE_PATH
 
 
 
 
-def deep_analyse(dataset_dir, output_dir):
+def deep_analyse(dataset_dir, output):
     """class analysis for dataset"""
     """class analysis for dataset"""
     tags = ['train', 'val']
     tags = ['train', 'val']
     all_instances = 0
     all_instances = 0
@@ -78,6 +78,6 @@ def deep_analyse(dataset_dir, output_dir):
     ax.set_ylabel('Counts')
     ax.set_ylabel('Counts')
     plt.legend()
     plt.legend()
     fig.tight_layout()
     fig.tight_layout()
-    fig_path = os.path.join(output_dir, "histogram.png")
+    fig_path = os.path.join(output, "histogram.png")
     fig.savefig(fig_path)
     fig.savefig(fig_path)
     return {"histogram": os.path.join("check_dataset", "histogram.png")}
     return {"histogram": os.path.join("check_dataset", "histogram.png")}

+ 3 - 3
paddlex/modules/object_detection/dataset_checker/dataset_src/check_dataset.py

@@ -25,7 +25,7 @@ from .....utils.errors import DatasetFileNotFoundError
 from .utils.visualizer import draw_bbox
 from .utils.visualizer import draw_bbox
 
 
 
 
-def check(dataset_dir, output_dir, sample_num=10):
+def check(dataset_dir, output, sample_num=10):
     """ check dataset """
     """ check dataset """
     dataset_dir = osp.abspath(dataset_dir)
     dataset_dir = osp.abspath(dataset_dir)
     if not osp.exists(dataset_dir) or not osp.isdir(dataset_dir):
     if not osp.exists(dataset_dir) or not osp.isdir(dataset_dir):
@@ -53,7 +53,7 @@ def check(dataset_dir, output_dir, sample_num=10):
             coco = COCO(file_list)
             coco = COCO(file_list)
             num_class = len(coco.getCatIds())
             num_class = len(coco.getCatIds())
 
 
-            vis_save_dir = osp.join(output_dir, 'demo_img')
+            vis_save_dir = osp.join(output, 'demo_img')
 
 
             image_info = jsondata['images']
             image_info = jsondata['images']
             sample_cnts[tag] = len(image_info)
             sample_cnts[tag] = len(image_info)
@@ -71,7 +71,7 @@ def check(dataset_dir, output_dir, sample_num=10):
                 Path(vis_path).parent.mkdir(parents=True, exist_ok=True)
                 Path(vis_path).parent.mkdir(parents=True, exist_ok=True)
                 vis_im.save(vis_path)
                 vis_im.save(vis_path)
                 sample_path = osp.join('check_dataset',
                 sample_path = osp.join('check_dataset',
-                                       os.path.relpath(vis_path, output_dir))
+                                       os.path.relpath(vis_path, output))
                 sample_paths[tag].append(sample_path)
                 sample_paths[tag].append(sample_path)
 
 
     attrs = {}
     attrs = {}

+ 4 - 4
paddlex/modules/object_detection/dataset_checker/dataset_src/convert_dataset.py

@@ -245,7 +245,7 @@ def convert_voc_dataset(root_dir, anno_map):
             annotation_paths=ann_paths,
             annotation_paths=ann_paths,
             label_indexer=label_indexer,
             label_indexer=label_indexer,
             img_indexer=img_indexer,
             img_indexer=img_indexer,
-            output_dir=annotations_dir,
+            output=annotations_dir,
             output_file=dst_anno)
             output_file=dst_anno)
 
 
 
 
@@ -354,7 +354,7 @@ def voc_get_coco_annotation(obj, label_indexer):
 
 
 
 
 def voc_xmls_to_cocojson(root_dir, annotation_paths, label_indexer, img_indexer,
 def voc_xmls_to_cocojson(root_dir, annotation_paths, label_indexer, img_indexer,
-                         output_dir, output_file):
+                         output, output_file):
     """
     """
     Convert VOC format data to COCO format.
     Convert VOC format data to COCO format.
     
     
@@ -362,7 +362,7 @@ def voc_xmls_to_cocojson(root_dir, annotation_paths, label_indexer, img_indexer,
         annotation_paths (list): A list of paths to the XML files.
         annotation_paths (list): A list of paths to the XML files.
         label_indexer: indexer to get category id by label name.
         label_indexer: indexer to get category id by label name.
         img_indexer: indexer to get image id by filename.
         img_indexer: indexer to get image id by filename.
-        output_dir (str): The directory to save output JSON file.
+        output (str): The directory to save output JSON file.
         output_file (str): Output JSON file name.
         output_file (str): Output JSON file name.
     
     
     Returns:
     Returns:
@@ -425,6 +425,6 @@ def voc_xmls_to_cocojson(root_dir, annotation_paths, label_indexer, img_indexer,
             bnd_id = bnd_id + 1
             bnd_id = bnd_id + 1
 
 
     output_json_dict['categories'] = label_indexer.get_list(key_name="name")
     output_json_dict['categories'] = label_indexer.get_list(key_name="name")
-    output_file = os.path.join(output_dir, output_file)
+    output_file = os.path.join(output, output_file)
     write_json_file(output_json_dict, output_file)
     write_json_file(output_json_dict, output_file)
     info(f"The converted annotations has been save to {output_file}.")
     info(f"The converted annotations has been save to {output_file}.")

+ 2 - 2
paddlex/modules/object_detection/evaluator.py

@@ -14,12 +14,12 @@
 
 
 
 
 from ..base import BaseEvaluator
 from ..base import BaseEvaluator
-from .support_models import SUPPORT_MODELS
+from .model_list import MODELS
 
 
 
 
 class DetEvaluator(BaseEvaluator):
 class DetEvaluator(BaseEvaluator):
     """ Object Detection Model Evaluator """
     """ Object Detection Model Evaluator """
-    support_models = SUPPORT_MODELS
+    entities = MODELS
 
 
     def update_config(self):
     def update_config(self):
         """update evalution config
         """update evalution config

+ 1 - 1
paddlex/modules/object_detection/support_models.py → paddlex/modules/object_detection/model_list.py

@@ -13,7 +13,7 @@
 # limitations under the License.
 # limitations under the License.
 
 
 
 
-SUPPORT_MODELS = [
+MODELS = [
     'PicoDet-L',
     'PicoDet-L',
     'PicoDet-S',
     'PicoDet-S',
     'PP-YOLOE_plus-L',
     'PP-YOLOE_plus-L',

+ 3 - 3
paddlex/modules/object_detection/predictor/predictor.py

@@ -24,12 +24,12 @@ from ...base.predictor.transforms import image_common
 from . import transforms as T
 from . import transforms as T
 from .keys import DetKeys as K
 from .keys import DetKeys as K
 from .utils import InnerConfig
 from .utils import InnerConfig
-from ..support_models import SUPPORT_MODELS
+from ..model_list import MODELS
 
 
 
 
 class DetPredictor(BasePredictor):
 class DetPredictor(BasePredictor):
     """ Detection Predictor """
     """ Detection Predictor """
-    support_models = SUPPORT_MODELS
+    entities = MODELS
 
 
     def load_other_src(self):
     def load_other_src(self):
         """ load the inner config file """
         """ load the inner config file """
@@ -91,6 +91,6 @@ class DetPredictor(BasePredictor):
         """ get postprocess transforms """
         """ get postprocess transforms """
         return [
         return [
             T.SaveDetResults(
             T.SaveDetResults(
-                save_dir=self.output_dir, labels=self.other_src.labels),
+                save_dir=self.output, labels=self.other_src.labels),
             T.PrintResult()
             T.PrintResult()
         ]
         ]

+ 2 - 2
paddlex/modules/object_detection/trainer.py

@@ -19,12 +19,12 @@ import paddle
 from ..base import BaseTrainer, BaseTrainDeamon
 from ..base import BaseTrainer, BaseTrainDeamon
 from ...utils.config import AttrDict
 from ...utils.config import AttrDict
 from ...utils import logging
 from ...utils import logging
-from .support_models import SUPPORT_MODELS
+from .model_list import MODELS
 
 
 
 
 class DetTrainer(BaseTrainer):
 class DetTrainer(BaseTrainer):
     """ Object Detection Model Trainer """
     """ Object Detection Model Trainer """
-    support_models = SUPPORT_MODELS
+    entities = MODELS
 
 
     def build_deamon(self, config: AttrDict) -> "DetTrainDeamon":
     def build_deamon(self, config: AttrDict) -> "DetTrainDeamon":
         """build deamon thread for saving training outputs timely
         """build deamon thread for saving training outputs timely

+ 4 - 4
paddlex/modules/semantic_segmentation/dataset_checker/__init__.py

@@ -19,12 +19,12 @@ import os.path as osp
 from ...base import BaseDatasetChecker
 from ...base import BaseDatasetChecker
 from .dataset_src import check_dataset, convert_dataset, split_dataset, anaylse_dataset
 from .dataset_src import check_dataset, convert_dataset, split_dataset, anaylse_dataset
 
 
-from ..support_models import SUPPORT_MODELS
+from ..model_list import MODELS
 
 
 
 
 class SegDatasetChecker(BaseDatasetChecker):
 class SegDatasetChecker(BaseDatasetChecker):
     """ Dataset Checker for Semantic Segmentation Model """
     """ Dataset Checker for Semantic Segmentation Model """
-    support_models = SUPPORT_MODELS
+    entities = MODELS
     sample_num = 10
     sample_num = 10
 
 
     def convert_dataset(self, src_dataset_dir: str) -> str:
     def convert_dataset(self, src_dataset_dir: str) -> str:
@@ -62,7 +62,7 @@ class SegDatasetChecker(BaseDatasetChecker):
         Returns:
         Returns:
             dict: dataset summary.
             dict: dataset summary.
         """
         """
-        return check_dataset(dataset_dir, self.output_dir, sample_num)
+        return check_dataset(dataset_dir, self.output, sample_num)
 
 
     def analyse(self, dataset_dir: str) -> dict:
     def analyse(self, dataset_dir: str) -> dict:
         """deep analyse dataset
         """deep analyse dataset
@@ -73,7 +73,7 @@ class SegDatasetChecker(BaseDatasetChecker):
         Returns:
         Returns:
             dict: the deep analysis results.
             dict: the deep analysis results.
         """
         """
-        return anaylse_dataset(dataset_dir, self.output_dir)
+        return anaylse_dataset(dataset_dir, self.output)
 
 
     def get_show_type(self) -> str:
     def get_show_type(self) -> str:
         """get the show type of dataset
         """get the show type of dataset

+ 2 - 2
paddlex/modules/semantic_segmentation/dataset_checker/dataset_src/analyse_dataset.py

@@ -25,7 +25,7 @@ from .....utils.file_interface import custom_open
 from .....utils.logging import info
 from .....utils.logging import info
 
 
 
 
-def anaylse_dataset(dataset_dir, output_dir):
+def anaylse_dataset(dataset_dir, output):
     """class analysis for dataset"""
     """class analysis for dataset"""
 
 
     split_tags = ["train", "val"]
     split_tags = ["train", "val"]
@@ -71,6 +71,6 @@ def anaylse_dataset(dataset_dir, output_dir):
     ax.set_ylabel('Sample Counts')
     ax.set_ylabel('Sample Counts')
     plt.legend()
     plt.legend()
     fig.tight_layout()
     fig.tight_layout()
-    fig_path = os.path.join(output_dir, "histogram.png")
+    fig_path = os.path.join(output, "histogram.png")
     fig.savefig(fig_path)
     fig.savefig(fig_path)
     return {"histogram": os.path.join("check_dataset", "histogram.png")}
     return {"histogram": os.path.join("check_dataset", "histogram.png")}

+ 3 - 4
paddlex/modules/semantic_segmentation/dataset_checker/dataset_src/check_dataset.py

@@ -26,12 +26,12 @@ from .....utils.file_interface import custom_open
 from .....utils.logging import info
 from .....utils.logging import info
 
 
 
 
-def check_dataset(dataset_dir, output_dir, sample_num=10):
+def check_dataset(dataset_dir, output, sample_num=10):
     """ check dataset """
     """ check dataset """
     dataset_dir = osp.abspath(dataset_dir)
     dataset_dir = osp.abspath(dataset_dir)
     if not osp.exists(dataset_dir) or not osp.isdir(dataset_dir):
     if not osp.exists(dataset_dir) or not osp.isdir(dataset_dir):
         raise DatasetFileNotFoundError(file_path=dataset_dir)
         raise DatasetFileNotFoundError(file_path=dataset_dir)
-    vis_save_dir = osp.join(output_dir, 'demo_img')
+    vis_save_dir = osp.join(output, 'demo_img')
     if not osp.exists(vis_save_dir):
     if not osp.exists(vis_save_dir):
         os.makedirs(vis_save_dir)
         os.makedirs(vis_save_dir)
     split_tags = ["train", "val"]
     split_tags = ["train", "val"]
@@ -67,8 +67,7 @@ def check_dataset(dataset_dir, output_dir, sample_num=10):
                                              osp.basename(img_file))
                                              osp.basename(img_file))
                     vis_img.save(vis_save_path)
                     vis_img.save(vis_save_path)
                     vis_save_path = osp.join(
                     vis_save_path = osp.join(
-                        'check_dataset',
-                        os.path.relpath(vis_save_path, output_dir))
+                        'check_dataset', os.path.relpath(vis_save_path, output))
                     if f"{tag}_sample_paths" not in attrs:
                     if f"{tag}_sample_paths" not in attrs:
                         attrs[f"{tag}_sample_paths"] = [vis_save_path]
                         attrs[f"{tag}_sample_paths"] = [vis_save_path]
                     else:
                     else:

+ 2 - 2
paddlex/modules/semantic_segmentation/evaluator.py

@@ -15,12 +15,12 @@
 
 
 from pathlib import Path
 from pathlib import Path
 from ..base import BaseEvaluator
 from ..base import BaseEvaluator
-from .support_models import SUPPORT_MODELS
+from .model_list import MODELS
 
 
 
 
 class SegEvaluator(BaseEvaluator):
 class SegEvaluator(BaseEvaluator):
     """ Semantic Segmentation Model Evaluator """
     """ Semantic Segmentation Model Evaluator """
-    support_models = SUPPORT_MODELS
+    entities = MODELS
 
 
     def update_config(self):
     def update_config(self):
         """update evalution config
         """update evalution config

+ 1 - 1
paddlex/modules/semantic_segmentation/support_models.py → paddlex/modules/semantic_segmentation/model_list.py

@@ -13,7 +13,7 @@
 # limitations under the License.
 # limitations under the License.
 
 
 
 
-SUPPORT_MODELS = [
+MODELS = [
     'Deeplabv3_Plus-R101',
     'Deeplabv3_Plus-R101',
     'Deeplabv3_Plus-R50',
     'Deeplabv3_Plus-R50',
     'Deeplabv3-R101',
     'Deeplabv3-R101',

+ 5 - 6
paddlex/modules/semantic_segmentation/predictor/predictor.py

@@ -23,24 +23,24 @@ from ...base import BasePredictor
 from .keys import SegKeys as K
 from .keys import SegKeys as K
 from . import transforms as T
 from . import transforms as T
 from .utils import InnerConfig
 from .utils import InnerConfig
-from ..support_models import SUPPORT_MODELS
+from ..model_list import MODELS
 
 
 
 
 class SegPredictor(BasePredictor):
 class SegPredictor(BasePredictor):
     """ SegPredictor """
     """ SegPredictor """
-    support_models = SUPPORT_MODELS
+    entities = MODELS
 
 
     def __init__(self,
     def __init__(self,
                  model_dir,
                  model_dir,
                  kernel_option,
                  kernel_option,
-                 output_dir,
+                 output,
                  pre_transforms=None,
                  pre_transforms=None,
                  post_transforms=None,
                  post_transforms=None,
                  has_prob_map=False):
                  has_prob_map=False):
         super().__init__(
         super().__init__(
             model_dir=model_dir,
             model_dir=model_dir,
             kernel_option=kernel_option,
             kernel_option=kernel_option,
-            output_dir=output_dir,
+            output=output,
             pre_transforms=pre_transforms,
             pre_transforms=pre_transforms,
             post_transforms=post_transforms)
             post_transforms=post_transforms)
         self.has_prob_map = has_prob_map
         self.has_prob_map = has_prob_map
@@ -104,6 +104,5 @@ class SegPredictor(BasePredictor):
     def _get_post_transforms_from_config(self):
     def _get_post_transforms_from_config(self):
         """ _get_post_transforms_from_config """
         """ _get_post_transforms_from_config """
         return [
         return [
-            T.GeneratePCMap(), T.SaveSegResults(self.output_dir),
-            T.PrintResult()
+            T.GeneratePCMap(), T.SaveSegResults(self.output), T.PrintResult()
         ]
         ]

+ 2 - 2
paddlex/modules/semantic_segmentation/trainer.py

@@ -20,12 +20,12 @@ import paddle
 
 
 from ..base import BaseTrainer, BaseTrainDeamon
 from ..base import BaseTrainer, BaseTrainDeamon
 from ...utils.config import AttrDict
 from ...utils.config import AttrDict
-from .support_models import SUPPORT_MODELS
+from .model_list import MODELS
 
 
 
 
 class SegTrainer(BaseTrainer):
 class SegTrainer(BaseTrainer):
     """ Semantic Segmentation Model Trainer """
     """ Semantic Segmentation Model Trainer """
-    support_models = SUPPORT_MODELS
+    entities = MODELS
 
 
     def build_deamon(self, config: AttrDict) -> "SegTrainDeamon":
     def build_deamon(self, config: AttrDict) -> "SegTrainDeamon":
         """build deamon thread for saving training outputs timely
         """build deamon thread for saving training outputs timely

Unele fișiere nu au fost afișate deoarece prea multe fișiere au fost modificate în acest diff