Browse Source

Merge pull request #211 from PaddlePaddle/jason

interpret可解释性bug修复
Jason 5 years ago
parent
commit
cde623669c
32 changed files with 418 additions and 113 deletions
  1. 11 2
      docs/data/format/classification.md
  2. 15 2
      docs/data/format/detection.md
  3. 15 2
      docs/data/format/instance_segmentation.md
  4. 15 3
      docs/data/format/segmentation.md
  5. 1 1
      docs/quick_start.md
  6. 4 4
      docs/train/instance_segmentation.md
  7. 7 7
      docs/train/object_detection.md
  8. 18 2
      docs/train/prediction.md
  9. 9 9
      docs/train/semantic_segmentation.md
  10. 1 1
      paddlex/cv/models/hrnet.py
  11. 12 4
      paddlex/interpret/interpretation_predict.py
  12. 14 5
      paddlex/interpret/visualize.py
  13. 4 3
      tutorials/train/image_classification/alexnet.py
  14. 4 2
      tutorials/train/image_classification/mobilenetv2.py
  15. 4 2
      tutorials/train/image_classification/mobilenetv3_small_ssld.py
  16. 4 2
      tutorials/train/image_classification/resnet50_vd_ssld.py
  17. 4 2
      tutorials/train/image_classification/shufflenetv2.py
  18. 5 5
      tutorials/train/instance_segmentation/mask_rcnn_hrnet_fpn.py
  19. 54 0
      tutorials/train/instance_segmentation/mask_rcnn_r18_fpn.py
  20. 8 8
      tutorials/train/instance_segmentation/mask_rcnn_r50_fpn.py
  21. 7 7
      tutorials/train/object_detection/faster_rcnn_hrnet_fpn.py
  22. 51 0
      tutorials/train/object_detection/faster_rcnn_r18_fpn.py
  23. 6 6
      tutorials/train/object_detection/faster_rcnn_r50_fpn.py
  24. 7 9
      tutorials/train/object_detection/yolov3_darknet53.py
  25. 2 4
      tutorials/train/object_detection/yolov3_mobilenetv1.py
  26. 7 9
      tutorials/train/object_detection/yolov3_mobilenetv3.py
  27. 7 4
      tutorials/train/semantic_segmentation/deeplabv3p_mobilenetv2.py
  28. 53 0
      tutorials/train/semantic_segmentation/deeplabv3p_mobilenetv2_x0.25.py
  29. 53 0
      tutorials/train/semantic_segmentation/deeplabv3p_xception65.py
  30. 6 3
      tutorials/train/semantic_segmentation/fast_scnn.py
  31. 6 3
      tutorials/train/semantic_segmentation/hrnet.py
  32. 4 2
      tutorials/train/semantic_segmentation/unet.py

+ 11 - 2
docs/data/format/classification.md

@@ -24,10 +24,13 @@ MyDataset/ # 图像分类数据集根目录
 ## 划分训练集验证集
 
 **为了用于训练,我们需要在`MyDataset`目录下准备`train_list.txt`, `val_list.txt`和`labels.txt`三个文件**,分别用于表示训练集列表,验证集列表和类别标签列表。[点击下载图像分类示例数据集](https://bj.bcebos.com/paddlex/datasets/vegetables_cls.tar.gz)
+
+<!--
 > 注:也可使用PaddleX自带工具,对数据集进行随机划分,**在数据集按照上面格式组织后**,使用如下命令即可快速完成数据集随机划分,其中split指标训练集的比例,剩余的比例用于验证集。
 > ```
 > paddlex --split_dataset --from ImageNet --split 0.8 --save_dir ./splited_dataset_dir
 > ```
+-->
 
 **labels.txt**  
 
@@ -60,8 +63,14 @@ val_list列出用于验证时的图片集成,与其对应的类别id,格式
 ```
 import paddlex as pdx
 from paddlex.cls import transforms
-train_transforms = transforms.ComposedClsTransforms(mode='train', crop_size=[224, 224])
-eval_transforms = transforms.ComposedClsTransforms(mode='eval', crop_size=[224, 224])
+train_transforms = transforms.Compose([
+    transforms.RandomCrop(crop_size=224), transforms.RandomHorizontalFlip(),
+    transforms.Normalize()
+])
+eval_transforms = transforms.Compose([
+    transforms.ResizeByShort(short_size=256),
+    transforms.CenterCrop(crop_size=224), transforms.Normalize()
+])
 train_dataset = pdx.datasets.ImageNet(
                     data_dir='./MyDataset',
                     file_list='./MyDataset/train_list.txt',

+ 15 - 2
docs/data/format/detection.md

@@ -21,10 +21,13 @@ MyDataset/ # 目标检测数据集根目录
 ## 划分训练集验证集
 
 **为了用于训练,我们需要在`MyDataset`目录下准备`train_list.txt`, `val_list.txt`和`labels.txt`三个文件**,分别用于表示训练集列表,验证集列表和类别标签列表。[点击下载目标检测示例数据集](https://bj.bcebos.com/paddlex/datasets/insect_det.tar.gz)
+
+<!--
 > 注:也可使用PaddleX自带工具,对数据集进行随机划分,**在数据集按照上面格式组织后**,使用如下命令即可快速完成数据集随机划分,其中split指标训练集的比例,剩余的比例用于验证集。
 > ```
 > paddlex --split_dataset --from PascalVOC --pics ./JPEGImages --annotations ./Annotations --split 0.8 --save_dir ./splited_dataset_dir
 > ```
+-->
 
 **labels.txt**  
 
@@ -56,8 +59,18 @@ val_list列出用于验证时的图片集成,与其对应的标注文件,格
 import paddlex as pdx
 from paddlex.det import transforms
 
-train_transforms = transforms.ComposedYOLOv3Transforms(mode='train', shape=[608, 608])
-eval_transforms = transforms.ComposedYOLOv3Transforms(mode='eval', shape=[608, 608])
+train_transforms = transforms.Compose([
+    transforms.RandomHorizontalFlip(),
+    transforms.Normalize(),
+    transforms.ResizeByShort(short_size=800, max_size=1333),
+    transforms.Padding(coarsest_stride=32)
+])
+
+eval_transforms = transforms.Compose([
+    transforms.Normalize(),
+    transforms.ResizeByShort(short_size=800, max_size=1333),
+    transforms.Padding(coarsest_stride=32),
+])
 
 train_dataset = pdx.datasets.VOCDetection(
                         data_dir='./MyDataset',

+ 15 - 2
docs/data/format/instance_segmentation.md

@@ -17,10 +17,13 @@ MyDataset/ # 实例分割数据集根目录
 ## 划分训练集验证集
 
 在PaddleX中,为了区分训练集和验证集,在`MyDataset`同级目录,使用不同的json表示数据的划分,例如`train.json`和`val.json`。[点击下载实例分割示例数据集](https://bj.bcebos.com/paddlex/datasets/garbage_ins_det.tar.gz)。
+
+<!--
 > 注:也可使用PaddleX自带工具,对数据集进行随机划分,在数据按照上述示例组织结构后,使用如下命令,即可快速完成数据集随机划分,其中split指定训练集的比例,剩余比例用于验证集。
 > ```
 > paddlex --split_dataset --from MSCOCO --pics ./JPEGImages --annotations ./annotations.json --split 0.8 --save_dir ./splited_dataset_dir
 > ```
+-->
 
 MSCOCO数据的标注文件采用json格式,用户可使用Labelme, 精灵标注助手或EasyData等标注工具进行标注,参见[数据标注工具](../annotations.md)
 
@@ -30,8 +33,18 @@ MSCOCO数据的标注文件采用json格式,用户可使用Labelme, 精灵标
 import paddlex as pdx
 from paddlex.det import transforms
 
-train_transforms = transforms.ComposedRCNNTransforms(mode='train', min_max_size=[800, 1333])
-eval_transforms = transforms.ComposedRCNNTransforms(mode='eval', min_max_size=[800, 1333])
+train_transforms = transforms.Compose([
+    transforms.RandomHorizontalFlip(),
+    transforms.Normalize(),
+    transforms.ResizeByShort(short_size=800, max_size=1333),
+    transforms.Padding(coarsest_stride=32)
+])
+
+eval_transforms = transforms.Compose([
+    transforms.Normalize(),
+    transforms.ResizeByShort(short_size=800, max_size=1333),
+    transforms.Padding(coarsest_stride=32),
+])
 
 train_dataset = pdx.dataset.CocoDetection(
                     data_dir='./MyDataset/JPEGImages',

+ 15 - 3
docs/data/format/segmentation.md

@@ -22,10 +22,13 @@ MyDataset/ # 语义分割数据集根目录
 ## 划分训练集验证集
 
 **为了用于训练,我们需要在`MyDataset`目录下准备`train_list.txt`, `val_list.txt`和`labels.txt`三个文件**,分别用于表示训练集列表,验证集列表和类别标签列表。[点击下载语义分割示例数据集](https://bj.bcebos.com/paddlex/datasets/optic_disc_seg.tar.gz)
+
+<!--
 > 注:也可使用PaddleX自带工具,对数据集进行随机划分,**在数据集按照上面格式组织后**,使用如下命令即可快速完成数据集随机划分,其中split指标训练集的比例,剩余的比例用于验证集。
 > ```
 > paddlex --split_dataset --from Seg --pics ./JPEGImages --annotations ./Annotations --split 0.8 --save_dir ./splited_dataset_dir
 > ```
+-->
 
 **labels.txt**  
 
@@ -58,8 +61,18 @@ val_list列出用于验证时的图片集成,与其对应的标注文件,格
 import paddlex as pdx
 from paddlex.seg import transforms
 
-train_transforms = transforms.ComposedSegTransforms(mode='train', train_crop_size=[512, 512])
-eval_transforms = transforms.ComposedSegTransforms(mode='eval', train_crop_size=[512, 512])
+train_transforms = transforms.Compose([
+    transforms.RandomHorizontalFlip(),
+    transforms.ResizeRangeScaling(),
+    transforms.RandomPaddingCrop(crop_size=512),
+    transforms.Normalize()
+])
+
+eval_transforms = transforms.Compose([
+    transforms.ResizeByLong(long_size=512),
+    transforms.Padding(target_size=512),
+    transforms.Normalize()
+])
 
 train_dataset = pdx.datasets.SegDataset(
                         data_dir='./MyDataset',
@@ -71,5 +84,4 @@ eval_dataset = pdx.datasets.SegDataset(
                         file_list='./MyDataset/val_list.txt',
                         label_list='MyDataset/labels.txt',
                         transforms=eval_transforms)
-
 ```

+ 1 - 1
docs/quick_start.md

@@ -14,8 +14,8 @@ PaddleX中的所有模型训练跟随以下3个步骤,即可快速完成训练
 
 PaddleX的其它用法
 
+- <a href="#训练过程使用VisualDL查看训练指标变化">使用VisualDL查看训练过程中的指标变化</a>
 - <a href="#加载训练保存的模型预测">加载训练保存的模型进行预测</a>
-- [使用VisualDL查看训练过程中的指标变化]()
 
 
 <a name="安装PaddleX"></a>

+ 4 - 4
docs/train/instance_segmentation.md

@@ -10,9 +10,9 @@ PaddleX目前提供了MaskRCNN实例分割模型结构,多种backbone模型,
 
 | 模型(点击获取代码)               | Box MMAP/Seg MMAP | 模型大小 | GPU预测速度 | Arm预测速度 | 备注 |
 | :----------------  | :------- | :------- | :---------  | :---------  | :-----    |
-| [MaskRCNN-ResNet50-FPN](https://github.com/PaddlePaddle/PaddleX/blob/doc/tutorials/train/instance_segmentation/mask_r50_fpn.py)   |  -/-   |   136.0MB    |  197.715ms       |   -    | 模型精度高,适用于服务端部署   |
-| [MaskRCNN-ResNet18-FPN](https://github.com/PaddlePaddle/PaddleX/blob/doc/tutorials/train/instance_segmentation/mask_r18_fpn.py)   |  -/-   |   -    |  -       |   -    | 模型精度高,适用于服务端部署   |
-| [MaskRCNN-HRNet-FPN](https://github.com/PaddlePaddle/PaddleX/blob/doc/tutorials/train/instance_segmentation/mask_hrnet_fpn.py)   |  -/-   |   115.MB    |  81.592ms       |   -    | 模型精度高,预测速度快,适用于服务端部署   |
+| [MaskRCNN-ResNet50-FPN](https://github.com/PaddlePaddle/PaddleX/blob/doc/tutorials/train/instance_segmentation/mask_r50_fpn.py)   |  36.5%/32.2%   |   170.0MB    |  160.185ms       |   -    | 模型精度高,适用于服务端部署   |
+| [MaskRCNN-ResNet18-FPN](https://github.com/PaddlePaddle/PaddleX/blob/doc/tutorials/train/instance_segmentation/mask_r18_fpn.py)   |  -/-   |   120.0MB    |  -       |   -    | 模型精度高,适用于服务端部署   |
+| [MaskRCNN-HRNet-FPN](https://github.com/PaddlePaddle/PaddleX/blob/doc/tutorials/train/instance_segmentation/mask_hrnet_fpn.py)   |  -/-   |   116.MB    |  -       |   -    | 模型精度高,预测速度快,适用于服务端部署   |
 
 
 ## 开始训练
@@ -27,4 +27,4 @@ PaddleX目前提供了MaskRCNN实例分割模型结构,多种backbone模型,
 
 - 【**重要**】针对自己的机器环境和数据,调整训练参数?先了解下PaddleX中训练参数作用。[——>>传送门](../appendix/parameters.md)
 - 【**有用**】没有机器资源?使用AIStudio免费的GPU资源在线训练模型。[——>>传送门](https://aistudio.baidu.com/aistudio/projectdetail/450925)
-- 【**拓展**】更多图像分类模型,查阅[PaddleX模型库](../appendix/model_zoo.md)和[API使用文档](../apis/models/index.html)。
+- 【**拓展**】更多实例分割模型,查阅[PaddleX模型库](../appendix/model_zoo.md)和[API使用文档](../apis/models/index.html)。

+ 7 - 7
docs/train/object_detection.md

@@ -10,12 +10,12 @@ PaddleX目前提供了FasterRCNN和YOLOv3两种检测结构,多种backbone模型
 
 | 模型(点击获取代码)               | Box MMAP | 模型大小 | GPU预测速度 | Arm预测速度 | 备注 |
 | :----------------  | :------- | :------- | :---------  | :---------  | :-----    |
-| [YOLOv3-MobileNetV1](https://github.com/PaddlePaddle/PaddleX/blob/doc/tutorials/train/image_classification/yolov3_mobilenetv1.py) |  29.3%  |  99.2MB  |  15.442ms   | -  |  模型小,预测速度快,适用于低性能或移动端设备   |
-| [YOLOv3-MobileNetV3](https://github.com/PaddlePaddle/PaddleX/blob/doc/tutorials/train/image_classification/yolov3_mobilenetv3.py)        | 31.6%  | 100.7MB   |  143.322ms  | -  |  模型小,移动端上预测速度有优势   |
-| [YOLOv3-DarkNet53](https://github.com/PaddlePaddle/PaddleX/blob/doc/tutorials/train/image_classification/yolov3_darknet53.py)     | 38.9  | 249.2MB   | 42.672ms   | -  |  模型较大,预测速度快,适用于服务端   |
-| [FasterRCNN-ResNet50-FPN](https://github.com/PaddlePaddle/PaddleX/blob/doc/tutorials/train/image_classification/faster_r50_fpn.py)   |  37.2%   |   136.0MB    |  197.715ms       |   -    | 模型精度高,适用于服务端部署   |
-| [FasterRCNN-ResNet18-FPN](https://github.com/PaddlePaddle/PaddleX/blob/doc/tutorials/train/image_classification/faster_r18_fpn.py)   |  -   |   -    |  -       |   -    | 模型精度高,适用于服务端部署   |
-| [FasterRCNN-HRNet-FPN](https://github.com/PaddlePaddle/PaddleX/blob/doc/tutorials/train/image_classification/faster_hrnet_fpn.py)   |  36.0%   |   115.MB    |  81.592ms       |   -    | 模型精度高,预测速度快,适用于服务端部署   |
+| [YOLOv3-MobileNetV1](https://github.com/PaddlePaddle/PaddleX/blob/doc/tutorials/train/object_detection/yolov3_mobilenetv1.py) |  29.3%  |  99.2MB  |  15.442ms   | -  |  模型小,预测速度快,适用于低性能或移动端设备   |
+| [YOLOv3-MobileNetV3](https://github.com/PaddlePaddle/PaddleX/blob/doc/tutorials/train/object_detection/yolov3_mobilenetv3.py)        | 31.6%  | 100.7MB   |  143.322ms  | -  |  模型小,移动端上预测速度有优势   |
+| [YOLOv3-DarkNet53](https://github.com/PaddlePaddle/PaddleX/blob/doc/tutorials/train/object_detection/yolov3_darknet53.py)     | 38.9  | 249.2MB   | 42.672ms   | -  |  模型较大,预测速度快,适用于服务端   |
+| [FasterRCNN-ResNet50-FPN](https://github.com/PaddlePaddle/PaddleX/blob/doc/tutorials/train/object_detection/faster_r50_fpn.py)   |  37.2%   |   136.0MB    |  197.715ms       |   -    | 模型精度高,适用于服务端部署   |
+| [FasterRCNN-ResNet18-FPN](https://github.com/PaddlePaddle/PaddleX/blob/doc/tutorials/train/object_detection/faster_r18_fpn.py)   |  -   |   -    |  -       |   -    | 模型精度高,适用于服务端部署   |
+| [FasterRCNN-HRNet-FPN](https://github.com/PaddlePaddle/PaddleX/blob/doc/tutorials/train/object_detection/faster_hrnet_fpn.py)   |  36.0%   |   115.MB    |  81.592ms       |   -    | 模型精度高,预测速度快,适用于服务端部署   |
 
 
 ## 开始训练
@@ -31,4 +31,4 @@ PaddleX目前提供了FasterRCNN和YOLOv3两种检测结构,多种backbone模型
 
 - 【**重要**】针对自己的机器环境和数据,调整训练参数?先了解下PaddleX中训练参数作用。[——>>传送门](../appendix/parameters.md)
 - 【**有用**】没有机器资源?使用AIStudio免费的GPU资源在线训练模型。[——>>传送门](https://aistudio.baidu.com/aistudio/projectdetail/450925)
-- 【**拓展**】更多图像分类模型,查阅[PaddleX模型库](../appendix/model_zoo.md)和[API使用文档](../apis/models/index.html)。
+- 【**拓展**】更多目标检测模型,查阅[PaddleX模型库](../appendix/model_zoo.md)和[API使用文档](../apis/models/index.html)。

+ 18 - 2
docs/train/prediction.md

@@ -34,7 +34,23 @@ pdx.seg.visualize(test_jpg, result, weight=0.0, save_dir='./')
 在上述示例代码中,通过调用`paddlex.seg.visualize`可以对语义分割的预测结果进行可视化,可视化的结果保存在`save_dir`下。其中`weight`参数用于调整预测结果和原图结果融合展现时的权重,0.0时只展示预测结果mask的可视化,1.0时只展示原图可视化。
 
 
+## 公开数据集训练模型下载
+
+PaddleX提供了部分公开数据集上训练好的模型,用户可以直接下载后参照本文档加载使用。
+
+| 类型 |     模型(点击下载)     |     数据集    |     大小     |     指标    |    指标数值    |
+|:--- | :----------  | :-----------  | :----------  | :---------- | :------------- |
+| 图像分类 | [MobileNetV3_small_ssld](https://bj.bcebos.com/paddlex/models/mobilenetv3_small_ssld_imagenet.tar.gz) | ImageNet | 13MB | Accuracy  |     71.3%        |
+| 图像分类 | [ResNet50_vd_ssld](https://bj.bcebos.com/paddlex/models/resnet50_vd_ssld_imagenet.tar.gz)  | ImageNet  | 110MB  | Accuracy  |   82.4%       |
+| 目标检测 | [FasterRCNN-ResNet50-FPN](https://bj.bcebos.com/paddlex/models/faster_r50_fpn_coco.tar.gz) | MSCOCO | 179MB     |    Box MAP  |       37.7%     |
+| 目标检测 | [YOLOv3-MobileNetV1](https://bj.bcebos.com/paddlex/models/yolov3_mobilenetv1_coco.tar.gz)    | MSCOCO | 106MB      | Box MAP    |      29.3%      |
+| 目标检测 | [YOLOv3-DarkNet53](https://bj.bcebos.com/paddlex/models/yolov3_darknet53_coco.tar.gz)      | MSCOCO | 266MMB      | Box MAP    |      34.8%      |
+| 目标检测 | [YOLOv3-MobileNetV3](https://bj.bcebos.com/paddlex/models/yolov3_mobilenetv3_coco.tar.gz)      | MSCOCO | 101MB      | Box MAP    |      31.6%      |
+| 实例分割 | [MaskRCNN-ResNet50-FPN](https://bj.bcebos.com/paddlex/models/mask_r50_fpn_coco.tar.gz)  | MSCOCO | 193MB     | Box MAP/Seg MAP |   38.7% / 34.7%     |
+| 语义分割 | [DeepLabv3p-Xception65]()  | 人像分割 | xxMB     | mIoU        |      -          |
+| 语义分割 | [HRNet_w18_small]()           | 人像分割   | xxMB   | mIou       |        -           |
+
 PaddleX的`load_model`接口可以满足用户一般的模型调研需求,如若为更高性能的预测部署,可以参考如下文档
 
-- [服务端Python部署]()  
-- [服务端C++部署]()
+- [服务端Python部署](../deploy/server/python.md)  
+- [服务端C++部署](../deploy/server/cpp/index.html)

+ 9 - 9
docs/train/semantic_segmentation.md

@@ -4,18 +4,18 @@
 
 PaddleX目前提供了DeepLabv3p、UNet、HRNet和FastSCNN四种语义分割结构,多种backbone模型,可满足开发者不同场景和性能的需求。
 
-- **mIOU**: 模型在COCO数据集上的测试精度
+- **mIOU**: 模型在CityScape数据集上的测试精度
 - **预测速度**:单张图片的预测用时(不包括预处理和后处理)
 - "-"表示指标暂未更新
 
-| 模型(点击获取代码)               | Box MMAP | 模型大小 | GPU预测速度 | Arm预测速度 | 备注 |
+| 模型(点击获取代码)               | mIOU | 模型大小 | GPU预测速度 | Arm预测速度 | 备注 |
 | :----------------  | :------- | :------- | :---------  | :---------  | :-----    |
-| [DeepLabv3p-MobileNetV2-x0.25](https://github.com/PaddlePaddle/PaddleX/blob/doc/tutorials/train/image_classification/yolov3_mobilenetv1.py) |  29.3%  |  99.2MB  |  15.442ms   | -  |  模型小,预测速度快,适用于低性能或移动端设备   |
-| [DeepLabv3p-MobileNetV2-x1.0](https://github.com/PaddlePaddle/PaddleX/blob/doc/tutorials/train/image_classification/yolov3_mobilenetv1.py) |  29.3%  |  99.2MB  |  15.442ms   | -  |  模型小,预测速度快,适用于低性能或移动端设备   |
-| [DeepLabv3p-Xception65](https://github.com/PaddlePaddle/PaddleX/blob/doc/tutorials/train/image_classification/yolov3_mobilenetv3.py)        | 31.6%  | 100.7MB   |  143.322ms  | -  |  模型小,移动端上预测速度有优势   |
-| [UNet](https://github.com/PaddlePaddle/PaddleX/blob/doc/tutorials/train/image_classification/yolov3_darknet53.py)     | 38.9  | 249.2MB   | 42.672ms   | -  |  模型较大,预测速度快,适用于服务端   |
-| [HRNet](https://github.com/PaddlePaddle/PaddleX/blob/doc/tutorials/train/image_classification/faster_r50_fpn.py)   |  37.2%   |   136.0MB    |  197.715ms       |   -    | 模型精度高,适用于服务端部署   |
-| [FastSCNN](https://github.com/PaddlePaddle/PaddleX/blob/doc/tutorials/train/image_classification/faster_r18_fpn.py)   |  -   |   -    |  -       |   -    | 模型精度高,适用于服务端部署   |
+| [DeepLabv3p-MobileNetV2-x0.25](https://github.com/PaddlePaddle/PaddleX/blob/doc/tutorials/train/semantic_segmentation/deeplabv3p_mobilenetv2_x0.25.py) |  -  |  2.9MB  |  -   | -  |  模型小,预测速度快,适用于低性能或移动端设备   |
+| [DeepLabv3p-MobileNetV2-x1.0](https://github.com/PaddlePaddle/PaddleX/blob/doc/tutorials/train/semantic_segmentation/deeplabv3p_mobilenetv2.py) |  69.8%  |  11MB  |  -   | -  |  模型小,预测速度快,适用于低性能或移动端设备   |
+| [DeepLabv3p-Xception65](https://github.com/PaddlePaddle/PaddleX/blob/doc/tutorials/train/semantic_segmentation/deeplabv3p_xception65.pyy)        | 79.3%  | 158MB   |  -  | -  |  模型大,精度高,适用于服务端   |
+| [UNet](https://github.com/PaddlePaddle/PaddleX/blob/doc/tutorials/train/semantic_segmentation/unet.py)     | -  | 52MB   | -   | -  |  模型较大,精度高,适用于服务端   |
+| [HRNet](https://github.com/PaddlePaddle/PaddleX/blob/doc/tutorials/train/semantic_segmentation/hrnet.py)   |  79.4%   |   37MB    |  -       |   -    | 模型较小,模型精度高,适用于服务端部署   |
+| [FastSCNN](https://github.com/PaddlePaddle/PaddleX/blob/doc/tutorials/train/semantic_segmentation/fast_scnn.py)   |  -   |   4.5MB    |  -       |   -    | 模型小,预测速度快,适用于低性能或移动端设备   |
 
 
 ## 开始训练
@@ -31,4 +31,4 @@ PaddleX目前提供了DeepLabv3p、UNet、HRNet和FastSCNN四种语义分割结
 
 - 【**重要**】针对自己的机器环境和数据,调整训练参数?先了解下PaddleX中训练参数作用。[——>>传送门](../appendix/parameters.md)
 - 【**有用**】没有机器资源?使用AIStudio免费的GPU资源在线训练模型。[——>>传送门](https://aistudio.baidu.com/aistudio/projectdetail/450925)
-- 【**拓展**】更多图像分类模型,查阅[PaddleX模型库](../appendix/model_zoo.md)和[API使用文档](../apis/models/index.html)。
+- 【**拓展**】更多语义分割模型,查阅[PaddleX模型库](../appendix/model_zoo.md)和[API使用文档](../apis/models/index.html)。

+ 1 - 1
paddlex/cv/models/hrnet.py

@@ -25,7 +25,7 @@ class HRNet(DeepLabv3p):
     Args:
         num_classes (int): 类别数。
         width (int|str): 高分辨率分支中特征层的通道数量。默认值为18。可选择取值为[18, 30, 32, 40, 44, 48, 60, 64, '18_small_v1']。
-            '18_small_v1'是18的轻量级版本。
+            '18_small_v1'是18的轻量级版本,默认18
         use_bce_loss (bool): 是否使用bce loss作为网络的损失函数,只能用于两类分割。可与dice loss同时使用。默认False。
         use_dice_loss (bool): 是否使用dice loss作为网络的损失函数,只能用于两类分割,可与bce loss同时使用。
             当use_bce_loss和use_dice_loss都为False时,使用交叉熵损失函数。默认False。

+ 12 - 4
paddlex/interpret/interpretation_predict.py

@@ -15,11 +15,17 @@
 import numpy as np
 import cv2
 import copy
+import paddle.fluid as fluid
+from paddlex.cv.transforms import arrange_transforms
 
 
 def interpretation_predict(model, images):
     images = images.astype('float32')
-    model.arrange_transforms(transforms=model.test_transforms, mode='test')
+    arrange_transforms(
+        model.model_type,
+        model.__class__.__name__,
+        transforms=model.test_transforms,
+        mode='test')
     tmp_transforms = copy.deepcopy(model.test_transforms.transforms)
     model.test_transforms.transforms = model.test_transforms.transforms[-2:]
 
@@ -29,9 +35,11 @@ def interpretation_predict(model, images):
         new_imgs.append(model.test_transforms(images[i])[0])
 
     new_imgs = np.array(new_imgs)
-    out = model.exe.run(model.test_prog,
-                        feed={'image': new_imgs},
-                        fetch_list=list(model.interpretation_feats.values()))
+    with fluid.scope_guard(model.scope):
+        out = model.exe.run(
+            model.test_prog,
+            feed={'image': new_imgs},
+            fetch_list=list(model.interpretation_feats.values()))
 
     model.test_transforms.transforms = tmp_transforms
 

+ 14 - 5
paddlex/interpret/visualize.py

@@ -1,11 +1,11 @@
 # copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
-# 
+#
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
 # You may obtain a copy of the License at
-# 
+#
 #     http://www.apache.org/licenses/LICENSE-2.0
-# 
+#
 # Unless required by applicable law or agreed to in writing, software
 # distributed under the License is distributed on an "AS IS" BASIS,
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
@@ -22,6 +22,7 @@ from .interpretation_predict import interpretation_predict
 from .core.interpretation import Interpretation
 from .core.normlime_base import precompute_global_classifier
 from .core._session_preparation import gen_user_home
+from paddlex.cv.transforms import arrange_transforms
 
 
 def lime(img_file, model, num_samples=3000, batch_size=50, save_dir='./'):
@@ -48,7 +49,11 @@ def lime(img_file, model, num_samples=3000, batch_size=50, save_dir='./'):
             'The interpretation only can deal with the Normal model')
     if not osp.exists(save_dir):
         os.makedirs(save_dir)
-    model.arrange_transforms(transforms=model.test_transforms, mode='test')
+    arrange_transforms(
+        model.model_type,
+        model.__class__.__name__,
+        transforms=model.test_transforms,
+        mode='test')
     tmp_transforms = copy.deepcopy(model.test_transforms)
     tmp_transforms.transforms = tmp_transforms.transforms[:-2]
     img = tmp_transforms(img_file)[0]
@@ -94,7 +99,11 @@ def normlime(img_file,
             'The interpretation only can deal with the Normal model')
     if not osp.exists(save_dir):
         os.makedirs(save_dir)
-    model.arrange_transforms(transforms=model.test_transforms, mode='test')
+    arrange_transforms(
+        model.model_type,
+        model.__class__.__name__,
+        transforms=model.test_transforms,
+        mode='test')
     tmp_transforms = copy.deepcopy(model.test_transforms)
     tmp_transforms.transforms = tmp_transforms.transforms[:-2]
     img = tmp_transforms(img_file)[0]

+ 4 - 3
tutorials/train/image_classification/alexnet.py

@@ -1,4 +1,3 @@
-import os
 from paddlex.cls import transforms
 import paddlex as pdx
 
@@ -8,12 +7,14 @@ pdx.utils.download_and_decompress(veg_dataset, path='./')
 
 # 定义训练和验证时的transforms
 train_transforms = transforms.Compose([
-    transforms.RandomCrop(crop_size=224), transforms.RandomHorizontalFlip(),
+    transforms.RandomCrop(crop_size=224), 
+    transforms.RandomHorizontalFlip(),
     transforms.Normalize()
 ])
 eval_transforms = transforms.Compose([
     transforms.ResizeByShort(short_size=256),
-    transforms.CenterCrop(crop_size=224), transforms.Normalize()
+    transforms.CenterCrop(crop_size=224), 
+    transforms.Normalize()
 ])
 
 # 定义训练和验证所用的数据集

+ 4 - 2
tutorials/train/image_classification/mobilenetv2.py

@@ -8,12 +8,14 @@ pdx.utils.download_and_decompress(veg_dataset, path='./')
 
 # 定义训练和验证时的transforms
 train_transforms = transforms.Compose([
-    transforms.RandomCrop(crop_size=224), transforms.RandomHorizontalFlip(),
+    transforms.RandomCrop(crop_size=224), 
+    transforms.RandomHorizontalFlip(),
     transforms.Normalize()
 ])
 eval_transforms = transforms.Compose([
     transforms.ResizeByShort(short_size=256),
-    transforms.CenterCrop(crop_size=224), transforms.Normalize()
+    transforms.CenterCrop(crop_size=224), 
+    transforms.Normalize()
 ])
 
 # 定义训练和验证所用的数据集

+ 4 - 2
tutorials/train/image_classification/mobilenetv3_small_ssld.py

@@ -8,12 +8,14 @@ pdx.utils.download_and_decompress(veg_dataset, path='./')
 
 # 定义训练和验证时的transforms
 train_transforms = transforms.Compose([
-    transforms.RandomCrop(crop_size=224), transforms.RandomHorizontalFlip(),
+    transforms.RandomCrop(crop_size=224), 
+    transforms.RandomHorizontalFlip(),
     transforms.Normalize()
 ])
 eval_transforms = transforms.Compose([
     transforms.ResizeByShort(short_size=256),
-    transforms.CenterCrop(crop_size=224), transforms.Normalize()
+    transforms.CenterCrop(crop_size=224), 
+    transforms.Normalize()
 ])
 
 # 定义训练和验证所用的数据集

+ 4 - 2
tutorials/train/image_classification/resnet50_vd_ssld.py

@@ -8,12 +8,14 @@ pdx.utils.download_and_decompress(veg_dataset, path='./')
 
 # 定义训练和验证时的transforms
 train_transforms = transforms.Compose([
-    transforms.RandomCrop(crop_size=224), transforms.RandomHorizontalFlip(),
+    transforms.RandomCrop(crop_size=224), 
+    transforms.RandomHorizontalFlip(),
     transforms.Normalize()
 ])
 eval_transforms = transforms.Compose([
     transforms.ResizeByShort(short_size=256),
-    transforms.CenterCrop(crop_size=224), transforms.Normalize()
+    transforms.CenterCrop(crop_size=224), 
+    transforms.Normalize()
 ])
 
 # 定义训练和验证所用的数据集

+ 4 - 2
tutorials/train/image_classification/shufflenetv2.py

@@ -8,12 +8,14 @@ pdx.utils.download_and_decompress(veg_dataset, path='./')
 
 # 定义训练和验证时的transforms
 train_transforms = transforms.Compose([
-    transforms.RandomCrop(crop_size=224), transforms.RandomHorizontalFlip(),
+    transforms.RandomCrop(crop_size=224), 
+    transforms.RandomHorizontalFlip(),
     transforms.Normalize()
 ])
 eval_transforms = transforms.Compose([
     transforms.ResizeByShort(short_size=256),
-    transforms.CenterCrop(crop_size=224), transforms.Normalize()
+    transforms.CenterCrop(crop_size=224), 
+    transforms.Normalize()
 ])
 
 # 定义训练和验证所用的数据集

+ 5 - 5
tutorials/train/instance_segmentation/mask_rcnn_hrnet_fpn.py

@@ -11,15 +11,15 @@ pdx.utils.download_and_decompress(xiaoduxiong_dataset, path='./')
 
 # 定义训练和验证时的transforms
 train_transforms = transforms.Compose([
-    transforms.RandomHorizontalFlip(), transforms.Normalize(),
-    transforms.ResizeByShort(
-        short_size=800, max_size=1333), transforms.Padding(coarsest_stride=32)
+    transforms.RandomHorizontalFlip(), 
+    transforms.Normalize(),
+    transforms.ResizeByShort(short_size=800, max_size=1333), 
+    transforms.Padding(coarsest_stride=32)
 ])
 
 eval_transforms = transforms.Compose([
     transforms.Normalize(),
-    transforms.ResizeByShort(
-        short_size=800, max_size=1333),
+    transforms.ResizeByShort(short_size=800, max_size=1333),
     transforms.Padding(coarsest_stride=32),
 ])
 

+ 54 - 0
tutorials/train/instance_segmentation/mask_rcnn_r18_fpn.py

@@ -0,0 +1,54 @@
+import os
+# 选择使用0号卡
+os.environ['CUDA_VISIBLE_DEVICES'] = '0'
+
+from paddlex.det import transforms
+import paddlex as pdx
+
+# 下载和解压小度熊分拣数据集
+xiaoduxiong_dataset = 'https://bj.bcebos.com/paddlex/datasets/xiaoduxiong_ins_det.tar.gz'
+pdx.utils.download_and_decompress(xiaoduxiong_dataset, path='./')
+
+# 定义训练和验证时的transforms
+train_transforms = transforms.Compose([
+    transforms.RandomHorizontalFlip(), 
+    transforms.Normalize(),
+    transforms.ResizeByShort(short_size=800, max_size=1333), 
+    transforms.Padding(coarsest_stride=32)
+])
+
+eval_transforms = transforms.Compose([
+    transforms.Normalize(), 
+    transforms.ResizeByShort(short_size=800, max_size=1333), 
+    transforms.Padding(coarsest_stride=32)
+])
+
+# 定义训练和验证所用的数据集
+train_dataset = pdx.datasets.CocoDetection(
+    data_dir='xiaoduxiong_ins_det/JPEGImages',
+    ann_file='xiaoduxiong_ins_det/train.json',
+    transforms=train_transforms,
+    shuffle=True)
+eval_dataset = pdx.datasets.CocoDetection(
+    data_dir='xiaoduxiong_ins_det/JPEGImages',
+    ann_file='xiaoduxiong_ins_det/val.json',
+    transforms=eval_transforms)
+
+# 初始化模型,并进行训练
+# 可使用VisualDL查看训练指标
+# VisualDL启动方式: visualdl --logdir output/mask_rcnn_r50_fpn/vdl_log --port 8001
+# 浏览器打开 https://0.0.0.0:8001即可
+# 其中0.0.0.0为本机访问,如为远程服务, 改成相应机器IP
+# num_classes 需要设置为包含背景类的类别数,即: 目标类别数量 + 1
+num_classes = len(train_dataset.labels) + 1
+model = pdx.det.MaskRCNN(num_classes=num_classes, backbone='ResNet18')
+model.train(
+    num_epochs=12,
+    train_dataset=train_dataset,
+    train_batch_size=1,
+    eval_dataset=eval_dataset,
+    learning_rate=0.00125,
+    warmup_steps=10,
+    lr_decay_epochs=[8, 11],
+    save_dir='output/mask_rcnn_r18_fpn',
+    use_vdl=True)

+ 8 - 8
tutorials/train/instance_segmentation/mask_rcnn_r50_fpn.py

@@ -11,16 +11,16 @@ pdx.utils.download_and_decompress(xiaoduxiong_dataset, path='./')
 
 # 定义训练和验证时的transforms
 train_transforms = transforms.Compose([
-    transforms.RandomHorizontalFlip(), transforms.Normalize(),
-    transforms.ResizeByShort(
-        short_size=800, max_size=1333), transforms.Padding(coarsest_stride=32)
+    transforms.RandomHorizontalFlip(), 
+    transforms.Normalize(),
+    transforms.ResizeByShort(short_size=800, max_size=1333), 
+    transforms.Padding(coarsest_stride=32)
 ])
 
 eval_transforms = transforms.Compose([
-    transforms.Normalize(),
-    transforms.ResizeByShort(
-        short_size=800, max_size=1333),
-    transforms.Padding(coarsest_stride=32),
+    transforms.Normalize(), 
+    transforms.ResizeByShort(short_size=800, max_size=1333), 
+    transforms.Padding(coarsest_stride=32)
 ])
 
 # 定义训练和验证所用的数据集
@@ -41,7 +41,7 @@ eval_dataset = pdx.datasets.CocoDetection(
 # 其中0.0.0.0为本机访问,如为远程服务, 改成相应机器IP
 # num_classes 需要设置为包含背景类的类别数,即: 目标类别数量 + 1
 num_classes = len(train_dataset.labels) + 1
-model = pdx.det.MaskRCNN(num_classes=num_classes, backbone='ResNet50_vd')
+model = pdx.det.MaskRCNN(num_classes=num_classes, backbone='ResNet50')
 model.train(
     num_epochs=12,
     train_dataset=train_dataset,

+ 7 - 7
tutorials/train/object_detection/faster_rcnn_hrnet_fpn.py

@@ -11,16 +11,16 @@ pdx.utils.download_and_decompress(insect_dataset, path='./')
 
 # 定义训练和验证时的transforms
 train_transforms = transforms.Compose([
-    transforms.RandomHorizontalFlip(), transforms.Normalize(),
-    transforms.ResizeByShort(
-        short_size=800, max_size=1333), transforms.Padding(coarsest_stride=32)
+    transforms.RandomHorizontalFlip(), 
+    transforms.Normalize(),
+    transforms.ResizeByShort(short_size=800, max_size=1333), 
+    transforms.Padding(coarsest_stride=32)
 ])
 
 eval_transforms = transforms.Compose([
-    transforms.Normalize(),
-    transforms.ResizeByShort(
-        short_size=800, max_size=1333),
-    transforms.Padding(coarsest_stride=32),
+    transforms.Normalize(), 
+    transforms.ResizeByShort(short_size=800, max_size=1333), 
+    transforms.Padding(coarsest_stride=32)
 ])
 
 # 定义训练和验证所用的数据集

+ 51 - 0
tutorials/train/object_detection/faster_rcnn_r18_fpn.py

@@ -0,0 +1,51 @@
+import os
+from paddlex.det import transforms
+import paddlex as pdx
+
+# 下载和解压昆虫检测数据集
+insect_dataset = 'https://bj.bcebos.com/paddlex/datasets/insect_det.tar.gz'
+pdx.utils.download_and_decompress(insect_dataset, path='./')
+
+# 定义训练和验证时的transforms
+train_transforms = transforms.Compose([
+    transforms.RandomHorizontalFlip(), 
+    transforms.Normalize(),
+    transforms.ResizeByShort(short_size=800, max_size=1333), 
+    transforms.Padding(coarsest_stride=32)
+])
+
+eval_transforms = transforms.Compose([
+    transforms.Normalize(),
+    transforms.ResizeByShort(short_size=800, max_size=1333),
+    transforms.Padding(coarsest_stride=32),
+])
+# 定义训练和验证所用的数据集
+train_dataset = pdx.datasets.VOCDetection(
+    data_dir='insect_det',
+    file_list='insect_det/train_list.txt',
+    label_list='insect_det/labels.txt',
+    transforms=train_transforms,
+    shuffle=True)
+eval_dataset = pdx.datasets.VOCDetection(
+    data_dir='insect_det',
+    file_list='insect_det/val_list.txt',
+    label_list='insect_det/labels.txt',
+    transforms=eval_transforms)
+
+# 初始化模型,并进行训练
+# 可使用VisualDL查看训练指标
+# VisualDL启动方式: visualdl --logdir output/faster_rcnn_r50_fpn/vdl_log --port 8001
+# 浏览器打开 https://0.0.0.0:8001即可
+# 其中0.0.0.0为本机访问,如为远程服务, 改成相应机器IP
+# num_classes 需要设置为包含背景类的类别数,即: 目标类别数量 + 1
+num_classes = len(train_dataset.labels) + 1
+model = pdx.det.FasterRCNN(num_classes=num_classes, backbone='ResNet18')
+model.train(
+    num_epochs=12,
+    train_dataset=train_dataset,
+    train_batch_size=2,
+    eval_dataset=eval_dataset,
+    learning_rate=0.0025,
+    lr_decay_epochs=[8, 11],
+    save_dir='output/faster_rcnn_r50_fpn',
+    use_vdl=True)

+ 6 - 6
tutorials/train/object_detection/faster_rcnn_r50_fpn.py

@@ -8,15 +8,15 @@ pdx.utils.download_and_decompress(insect_dataset, path='./')
 
 # 定义训练和验证时的transforms
 train_transforms = transforms.Compose([
-    transforms.RandomHorizontalFlip(), transforms.Normalize(),
-    transforms.ResizeByShort(
-        short_size=800, max_size=1333), transforms.Padding(coarsest_stride=32)
+    transforms.RandomHorizontalFlip(), 
+    transforms.Normalize(),
+    transforms.ResizeByShort(short_size=800, max_size=1333), 
+    transforms.Padding(coarsest_stride=32)
 ])
 
 eval_transforms = transforms.Compose([
     transforms.Normalize(),
-    transforms.ResizeByShort(
-        short_size=800, max_size=1333),
+    transforms.ResizeByShort(short_size=800, max_size=1333),
     transforms.Padding(coarsest_stride=32),
 ])
 # 定义训练和验证所用的数据集
@@ -39,7 +39,7 @@ eval_dataset = pdx.datasets.VOCDetection(
 # 其中0.0.0.0为本机访问,如为远程服务, 改成相应机器IP
 # num_classes 需要设置为包含背景类的类别数,即: 目标类别数量 + 1
 num_classes = len(train_dataset.labels) + 1
-model = pdx.det.FasterRCNN(num_classes=num_classes, backbone='ResNet50_vd')
+model = pdx.det.FasterRCNN(num_classes=num_classes, backbone='ResNet50')
 model.train(
     num_epochs=12,
     train_dataset=train_dataset,

+ 7 - 9
tutorials/train/object_detection/yolov3_darknet53.py

@@ -8,20 +8,18 @@ pdx.utils.download_and_decompress(insect_dataset, path='./')
 
 # 定义训练和验证时的transforms
 train_transforms = transforms.Compose([
-    transforms.MixupImage(mixup_epoch=250),
+    transforms.MixupImage(mixup_epoch=250), 
     transforms.RandomDistort(),
-    transforms.RandomExpand(),
-    transforms.RandomCrop(),
-    transforms.Resize(
-        target_size=608, interp='RANDOM'),
+    transforms.RandomExpand(), 
+    transforms.RandomCrop(), 
+    transforms.Resize(target_size=608, interp='RANDOM'), 
     transforms.RandomHorizontalFlip(),
-    transforms.Normalize(),
+    transforms.Normalize()
 ])
 
 eval_transforms = transforms.Compose([
-    transforms.Resize(
-        target_size=608, interp='CUBIC'),
-    transforms.Normalize(),
+    transforms.Resize(target_size=608, interp='CUBIC'), 
+    transforms.Normalize()
 ])
 
 # 定义训练和验证所用的数据集

+ 2 - 4
tutorials/train/object_detection/yolov3_mobilenetv1.py

@@ -12,15 +12,13 @@ train_transforms = transforms.Compose([
     transforms.RandomDistort(),
     transforms.RandomExpand(),
     transforms.RandomCrop(),
-    transforms.Resize(
-        target_size=608, interp='RANDOM'),
+    transforms.Resize(target_size=608, interp='RANDOM'),
     transforms.RandomHorizontalFlip(),
     transforms.Normalize(),
 ])
 
 eval_transforms = transforms.Compose([
-    transforms.Resize(
-        target_size=608, interp='CUBIC'),
+    transforms.Resize(target_size=608, interp='CUBIC'),
     transforms.Normalize(),
 ])
 

+ 7 - 9
tutorials/train/object_detection/yolov3_mobilenetv3.py

@@ -8,20 +8,18 @@ pdx.utils.download_and_decompress(insect_dataset, path='./')
 
 # 定义训练和验证时的transforms
 train_transforms = transforms.Compose([
-    transforms.MixupImage(mixup_epoch=250),
+    transforms.MixupImage(mixup_epoch=250), 
     transforms.RandomDistort(),
-    transforms.RandomExpand(),
-    transforms.RandomCrop(),
-    transforms.Resize(
-        target_size=608, interp='RANDOM'),
+    transforms.RandomExpand(), 
+    transforms.RandomCrop(), 
+    transforms.Resize(target_size=608, interp='RANDOM'), 
     transforms.RandomHorizontalFlip(),
-    transforms.Normalize(),
+    transforms.Normalize()
 ])
 
 eval_transforms = transforms.Compose([
-    transforms.Resize(
-        target_size=608, interp='CUBIC'),
-    transforms.Normalize(),
+    transforms.Resize(target_size=608, interp='CUBIC'), 
+    transforms.Normalize()
 ])
 
 # 定义训练和验证所用的数据集

+ 7 - 4
tutorials/train/semantic_segmentation/deeplabv3p_mobilenetv2.py

@@ -11,12 +11,15 @@ pdx.utils.download_and_decompress(optic_dataset, path='./')
 
 # 定义训练和验证时的transforms
 train_transforms = transforms.Compose([
-    transforms.RandomHorizontalFlip(), transforms.ResizeRangeScaling(),
-    transforms.RandomPaddingCrop(crop_size=512), transforms.Normalize()
+    transforms.RandomHorizontalFlip(), 
+    transforms.ResizeRangeScaling(),
+    transforms.RandomPaddingCrop(crop_size=512), 
+    transforms.Normalize()
 ])
 
 eval_transforms = transforms.Compose([
-    transforms.ResizeByLong(long_size=512), transforms.Padding(target_size=512),
+    transforms.ResizeByLong(long_size=512), 
+    transforms.Padding(target_size=512),
     transforms.Normalize()
 ])
 
@@ -39,7 +42,7 @@ eval_dataset = pdx.datasets.SegDataset(
 # 浏览器打开 https://0.0.0.0:8001即可
 # 其中0.0.0.0为本机访问,如为远程服务, 改成相应机器IP
 num_classes = len(train_dataset.labels)
-model = pdx.seg.DeepLabv3p(num_classes=num_classes)
+model = pdx.seg.DeepLabv3p(num_classes=num_classes, backbone='MobileNetV2_x1.0')
 model.train(
     num_epochs=40,
     train_dataset=train_dataset,

+ 53 - 0
tutorials/train/semantic_segmentation/deeplabv3p_mobilenetv2_x0.25.py

@@ -0,0 +1,53 @@
+import os
+# 选择使用0号卡
+os.environ['CUDA_VISIBLE_DEVICES'] = '0'
+
+import paddlex as pdx
+from paddlex.seg import transforms
+
+# 下载和解压视盘分割数据集
+optic_dataset = 'https://bj.bcebos.com/paddlex/datasets/optic_disc_seg.tar.gz'
+pdx.utils.download_and_decompress(optic_dataset, path='./')
+
+# 定义训练和验证时的transforms
+train_transforms = transforms.Compose([
+    transforms.RandomHorizontalFlip(), 
+    transforms.ResizeRangeScaling(),
+    transforms.RandomPaddingCrop(crop_size=512), 
+    transforms.Normalize()
+])
+
+eval_transforms = transforms.Compose([
+    transforms.ResizeByLong(long_size=512), 
+    transforms.Padding(target_size=512),
+    transforms.Normalize()
+])
+
+# 定义训练和验证所用的数据集
+train_dataset = pdx.datasets.SegDataset(
+    data_dir='optic_disc_seg',
+    file_list='optic_disc_seg/train_list.txt',
+    label_list='optic_disc_seg/labels.txt',
+    transforms=train_transforms,
+    shuffle=True)
+eval_dataset = pdx.datasets.SegDataset(
+    data_dir='optic_disc_seg',
+    file_list='optic_disc_seg/val_list.txt',
+    label_list='optic_disc_seg/labels.txt',
+    transforms=eval_transforms)
+
+# 初始化模型,并进行训练
+# 可使用VisualDL查看训练指标
+# VisualDL启动方式: visualdl --logdir output/deeplab/vdl_log --port 8001
+# 浏览器打开 https://0.0.0.0:8001即可
+# 其中0.0.0.0为本机访问,如为远程服务, 改成相应机器IP
+num_classes = len(train_dataset.labels)
+model = pdx.seg.DeepLabv3p(num_classes=num_classes, backbone='MobileNetV2_x0.25')
+model.train(
+    num_epochs=40,
+    train_dataset=train_dataset,
+    train_batch_size=4,
+    eval_dataset=eval_dataset,
+    learning_rate=0.01,
+    save_dir='output/deeplabv3p_mobilenetv2_x0_25',
+    use_vdl=True)

+ 53 - 0
tutorials/train/semantic_segmentation/deeplabv3p_xception65.py

@@ -0,0 +1,53 @@
+import os
+# 选择使用0号卡
+os.environ['CUDA_VISIBLE_DEVICES'] = '0'
+
+import paddlex as pdx
+from paddlex.seg import transforms
+
+# 下载和解压视盘分割数据集
+optic_dataset = 'https://bj.bcebos.com/paddlex/datasets/optic_disc_seg.tar.gz'
+pdx.utils.download_and_decompress(optic_dataset, path='./')
+
+# 定义训练和验证时的transforms
+train_transforms = transforms.Compose([
+    transforms.RandomHorizontalFlip(), 
+    transforms.ResizeRangeScaling(),
+    transforms.RandomPaddingCrop(crop_size=512), 
+    transforms.Normalize()
+])
+
+eval_transforms = transforms.Compose([
+    transforms.ResizeByLong(long_size=512), 
+    transforms.Padding(target_size=512),
+    transforms.Normalize()
+])
+
+# 定义训练和验证所用的数据集
+train_dataset = pdx.datasets.SegDataset(
+    data_dir='optic_disc_seg',
+    file_list='optic_disc_seg/train_list.txt',
+    label_list='optic_disc_seg/labels.txt',
+    transforms=train_transforms,
+    shuffle=True)
+eval_dataset = pdx.datasets.SegDataset(
+    data_dir='optic_disc_seg',
+    file_list='optic_disc_seg/val_list.txt',
+    label_list='optic_disc_seg/labels.txt',
+    transforms=eval_transforms)
+
+# 初始化模型,并进行训练
+# 可使用VisualDL查看训练指标
+# VisualDL启动方式: visualdl --logdir output/deeplab/vdl_log --port 8001
+# 浏览器打开 https://0.0.0.0:8001即可
+# 其中0.0.0.0为本机访问,如为远程服务, 改成相应机器IP
+num_classes = len(train_dataset.labels)
+model = pdx.seg.DeepLabv3p(num_classes=num_classes, backbone='Xception65')
+model.train(
+    num_epochs=40,
+    train_dataset=train_dataset,
+    train_batch_size=4,
+    eval_dataset=eval_dataset,
+    learning_rate=0.01,
+    save_dir='output/deeplabv3p_mobilenetv2',
+    use_vdl=True)

+ 6 - 3
tutorials/train/semantic_segmentation/fast_scnn.py

@@ -12,12 +12,15 @@ pdx.utils.download_and_decompress(optic_dataset, path='./')
 # 定义训练和验证时的transforms
 # API说明: https://paddlex.readthedocs.io/zh_CN/latest/apis/transforms/seg_transforms.html#composedsegtransforms
 train_transforms = transforms.Compose([
-    transforms.RandomHorizontalFlip(), transforms.ResizeRangeScaling(),
-    transforms.RandomPaddingCrop(crop_size=512), transforms.Normalize()
+    transforms.RandomHorizontalFlip(), 
+    transforms.ResizeRangeScaling(),
+    transforms.RandomPaddingCrop(crop_size=512), 
+    transforms.Normalize()
 ])
 
 eval_transforms = transforms.Compose([
-    transforms.ResizeByLong(long_size=512), transforms.Padding(target_size=512),
+    transforms.ResizeByLong(long_size=512), 
+    transforms.Padding(target_size=512),
     transforms.Normalize()
 ])
 

+ 6 - 3
tutorials/train/semantic_segmentation/hrnet.py

@@ -11,12 +11,15 @@ pdx.utils.download_and_decompress(optic_dataset, path='./')
 
 # 定义训练和验证时的transforms
 train_transforms = transforms.Compose([
-    transforms.RandomHorizontalFlip(), transforms.ResizeRangeScaling(),
-    transforms.RandomPaddingCrop(crop_size=512), transforms.Normalize()
+    transforms.RandomHorizontalFlip(), 
+    transforms.ResizeRangeScaling(),
+    transforms.RandomPaddingCrop(crop_size=512), 
+    transforms.Normalize()
 ])
 
 eval_transforms = transforms.Compose([
-    transforms.ResizeByLong(long_size=512), transforms.Padding(target_size=512),
+    transforms.ResizeByLong(long_size=512), 
+    transforms.Padding(target_size=512),
     transforms.Normalize()
 ])
 

+ 4 - 2
tutorials/train/semantic_segmentation/unet.py

@@ -11,8 +11,10 @@ pdx.utils.download_and_decompress(optic_dataset, path='./')
 
 # 定义训练和验证时的transforms
 train_transforms = transforms.Compose([
-    transforms.RandomHorizontalFlip(), transforms.ResizeRangeScaling(),
-    transforms.RandomPaddingCrop(crop_size=512), transforms.Normalize()
+    transforms.RandomHorizontalFlip(), 
+    transforms.ResizeRangeScaling(),
+    transforms.RandomPaddingCrop(crop_size=512), 
+    transforms.Normalize()
 ])
 
 eval_transforms = transforms.Compose([