Browse Source

Merge pull request #10 from PaddlePaddle/develop

00
SunAhong1993 5 years ago
parent
commit
17f8b25694
48 changed files with 1033 additions and 75 deletions
  1. 2 0
      .pre-commit-config.yaml
  2. 2 6
      deploy/cpp/include/paddlex/transforms.h
  3. 3 1
      docs/apis/models/classification.md
  4. 61 0
      docs/apis/transforms/cls_transforms.md
  5. 130 0
      docs/apis/transforms/det_transforms.md
  6. 60 0
      docs/apis/transforms/seg_transforms.md
  7. 32 0
      docs/appendix/parameters.md
  8. BIN
      docs/images/._文件(p37) BDSZYF000132754-docs jiangjiajun$ pwd :Users:jiangjiajun:Downloads:PaddleX-develop:docs:vdl1.png
  9. BIN
      docs/images/文件(p37) BDSZYF000132754-docs jiangjiajun$ pwd :Users:jiangjiajun:Downloads:PaddleX-develop:docs:vdl1.png
  10. 28 1
      docs/paddlex_gui/download.md
  11. 148 1
      docs/paddlex_gui/how_to_use.md
  12. BIN
      docs/paddlex_gui/images/QR.jpg
  13. 1 0
      docs/paddlex_gui/images/ReadMe
  14. BIN
      docs/paddlex_gui/images/dataset2.jpg
  15. BIN
      docs/paddlex_gui/images/datasets1.jpg
  16. BIN
      docs/paddlex_gui/images/project1.jpg
  17. BIN
      docs/paddlex_gui/images/project2.jpg
  18. BIN
      docs/paddlex_gui/images/project3.jpg
  19. BIN
      docs/paddlex_gui/images/publish.jpg
  20. BIN
      docs/paddlex_gui/images/visualization1.jpg
  21. BIN
      docs/paddlex_gui/images/visualization2.jpg
  22. BIN
      docs/paddlex_gui/images/visualization3.jpg
  23. BIN
      docs/paddlex_gui/images/visualization4.jpg
  24. BIN
      docs/paddlex_gui/images/visualization5.jpg
  25. 1 1
      docs/paddlex_gui/index.rst
  26. 18 0
      new_tutorials/train/README.md
  27. 47 0
      new_tutorials/train/classification/mobilenetv2.py
  28. 56 0
      new_tutorials/train/classification/resnet50.py
  29. 49 0
      new_tutorials/train/detection/faster_rcnn_r50_fpn.py
  30. 48 0
      new_tutorials/train/detection/mask_rcnn_r50_fpn.py
  31. 48 0
      new_tutorials/train/detection/yolov3_darknet53.py
  32. 51 0
      new_tutorials/train/segmentation/deeplabv3p.py
  33. 47 0
      new_tutorials/train/segmentation/hrnet.py
  34. 47 0
      new_tutorials/train/segmentation/unet.py
  35. 1 1
      paddlex/__init__.py
  36. 32 9
      paddlex/cv/models/classifier.py
  37. 4 2
      paddlex/cv/models/deeplabv3p.py
  38. 16 10
      paddlex/cv/models/faster_rcnn.py
  39. 16 10
      paddlex/cv/models/mask_rcnn.py
  40. 38 8
      paddlex/cv/models/slim/prune.py
  41. 1 1
      paddlex/cv/models/slim/prune_config.py
  42. 15 8
      paddlex/cv/models/yolo_v3.py
  43. 6 1
      paddlex/cv/transforms/cls_transforms.py
  44. 8 3
      paddlex/cv/transforms/det_transforms.py
  45. 7 2
      paddlex/cv/transforms/seg_transforms.py
  46. 1 1
      paddlex/slim.py
  47. 8 8
      paddlex/utils/logging.py
  48. 1 1
      setup.py

+ 2 - 0
.pre-commit-config.yaml

@@ -23,6 +23,7 @@
         files: \.(c|cc|cxx|cpp|cu|h|hpp|hxx|proto|py)$
         exclude: (?!.*third_party)^.*$
 
+-   repo: local
     hooks:
     -   id: clang-format-with-version-check
         name: clang-format
@@ -31,6 +32,7 @@
         language: system
         files: \.(c|cc|cxx|cpp|cu|h|hpp|hxx|proto)$
 
+-   repo: local
     hooks:
     -   id: cpplint-cpp-source
         name: cpplint

+ 2 - 6
deploy/cpp/include/paddlex/transforms.h

@@ -83,7 +83,7 @@ class ResizeByShort : public Transform {
     } else {
       max_size_ = -1;
     }
-  };
+  }
   virtual bool Run(cv::Mat* im, ImageBlob* data);
 
  private:
@@ -96,7 +96,7 @@ class ResizeByLong : public Transform {
  public:
   virtual void Init(const YAML::Node& item) {
     long_size_ = item["long_size"].as<int>();
-  };
+  }
   virtual bool Run(cv::Mat* im, ImageBlob* data);
 
  private:
@@ -167,9 +167,6 @@ class Padding : public Transform {
         height_ = item["target_size"].as<std::vector<int>>()[1];
       }
     }
-    if (item["im_padding_value"].IsDefined()) {
-      value_ = item["im_padding_value"].as<std::vector<float>>();
-    }
   }
   virtual bool Run(cv::Mat* im, ImageBlob* data);
 
@@ -177,7 +174,6 @@ class Padding : public Transform {
   int coarsest_stride_ = -1;
   int width_ = 0;
   int height_ = 0;
-  std::vector<float> value_;
 };
 
 class Transforms {

+ 3 - 1
docs/apis/models/classification.md

@@ -15,7 +15,7 @@ paddlex.cls.ResNet50(num_classes=1000)
 ### train 训练接口
 
 ```python
-train(self, num_epochs, train_dataset, train_batch_size=64, eval_dataset=None, save_interval_epochs=1, log_interval_steps=2, save_dir='output', pretrain_weights='IMAGENET', optimizer=None, learning_rate=0.025, lr_decay_epochs=[30, 60, 90], lr_decay_gamma=0.1, use_vdl=False, sensitivities_file=None, eval_metric_loss=0.05, early_stop=False, early_stop_patience=5, resume_checkpoint=None)
+train(self, num_epochs, train_dataset, train_batch_size=64, eval_dataset=None, save_interval_epochs=1, log_interval_steps=2, save_dir='output', pretrain_weights='IMAGENET', optimizer=None, learning_rate=0.025, warmup_steps=0, warmup_start_lr=0.0, lr_decay_epochs=[30, 60, 90], lr_decay_gamma=0.1, use_vdl=False, sensitivities_file=None, eval_metric_loss=0.05, early_stop=False, early_stop_patience=5, resume_checkpoint=None)
 ```
 >
 > **参数**
@@ -30,6 +30,8 @@ train(self, num_epochs, train_dataset, train_batch_size=64, eval_dataset=None, s
 > > - **pretrain_weights** (str): 若指定为路径时,则加载路径下预训练模型;若为字符串'IMAGENET',则自动下载在ImageNet图片数据上预训练的模型权重;若为None,则不使用预训练模型。默认为'IMAGENET'。
 > > - **optimizer** (paddle.fluid.optimizer): 优化器。当该参数为None时,使用默认优化器:fluid.layers.piecewise_decay衰减策略,fluid.optimizer.Momentum优化方法。
 > > - **learning_rate** (float): 默认优化器的初始学习率。默认为0.025。
+> > - **warmup_steps** (int): 默认优化器的warmup步数,学习率将在设定的步数内,从warmup_start_lr线性增长至设定的learning_rate,默认为0。
+> > - **warmup_start_lr**(float): 默认优化器的warmup起始学习率,默认为0.0。
 > > - **lr_decay_epochs** (list): 默认优化器的学习率衰减轮数。默认为[30, 60, 90]。
 > > - **lr_decay_gamma** (float): 默认优化器的学习率衰减率。默认为0.1。
 > > - **use_vdl** (bool): 是否使用VisualDL进行可视化。默认值为False。

+ 61 - 0
docs/apis/transforms/cls_transforms.md

@@ -122,3 +122,64 @@ paddlex.cls.transforms.RandomDistort(brightness_range=0.9, brightness_prob=0.5,
 * **saturation_prob** (float): 随机调整饱和度的概率。默认为0.5。
 * **hue_range** (int): 色调因子的范围。默认为18。
 * **hue_prob** (float): 随机调整色调的概率。默认为0.5。
+
+## ComposedClsTransforms类
+```python
+paddlex.cls.transforms.ComposedClsTransforms(mode, crop_size=[224, 224], mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
+```
+分类模型中已经组合好的数据处理流程,开发者可以直接使用ComposedClsTransforms,简化手动组合transforms的过程, 该类中已经包含了[RandomCrop](#RandomCrop)和[RandomHorizontalFlip](#RandomHorizontalFlip)两种数据增强方式,你仍可以通过[add_augmenters函数接口](#add_augmenters)添加新的数据增强方式。  
+ComposedClsTransforms共包括以下几个步骤:
+> 训练阶段:
+> > 1. 随机从图像中crop一块子图,并resize成crop_size大小
+> > 2. 将1的输出按0.5的概率随机进行水平翻转
+> > 3. 将图像进行归一化
+> 验证/预测阶段:
+> > 1. 将图像按比例Resize,使得最小边长度为crop_size[0] * 1.14
+> > 2. 从图像中心crop出一个大小为crop_size的图像
+> > 3. 将图像进行归一化
+
+### 参数
+* **mode** (str): Transforms所处的阶段,包括`train', 'eval'或'test'
+* **crop_size** (int|list): 输入到模型里的图像大小,默认为[224, 224](与原图大小无关,根据上述几个步骤,会将原图处理成该图大小输入给模型训练)
+* **mean** (list): 图像均值, 默认为[0.485, 0.456, 0.406]。
+* **std** (list): 图像方差,默认为[0.229, 0.224, 0.225]。
+
+### 添加数据增强方式
+```python
+ComposedClsTransforms.add_augmenters(augmenters)
+```
+> **参数**
+> * **augmenters**(list): 数据增强方式列表
+
+#### 使用示例
+```
+import paddlex as pdx
+from paddlex.cls import transforms
+train_transforms = transforms.ComposedClsTransforms(mode='train', crop_size=[320, 320])
+eval_transforms = transforms.ComposedClsTransforms(mode='eval', crop_size=[320, 320])
+
+# 添加数据增强
+import imgaug.augmenters as iaa
+train_transforms.add_augmenters([
+			transforms.RandomDistort(),
+			iaa.blur.GaussianBlur(sigma=(0.0, 3.0))
+])
+```
+上面代码等价于
+```
+import paddlex as pdx
+from paddlex.cls import transforms
+train_transforms = transforms.Composed([
+		transforms.RandomDistort(),
+		iaa.blur.GaussianBlur(sigma=(0.0, 3.0)),
+		# 上面两个为通过add_augmenters额外添加的数据增强方式
+		transforms.RandomCrop(crop_size=320),
+		transforms.RandomHorizontalFlip(prob=0.5),
+		transforms.Normalize()
+])
+eval_transforms = transforms.Composed([
+		transforms.ResizeByShort(short_size=int(320*1.14)),
+		transforms.CenterCrop(crop_size=320),
+		transforms.Normalize()
+])
+```

+ 130 - 0
docs/apis/transforms/det_transforms.md

@@ -167,3 +167,133 @@ paddlex.det.transforms.RandomCrop(aspect_ratio=[.5, 2.], thresholds=[.0, .1, .3,
 * **num_attempts** (int): 在放弃寻找有效裁剪区域前尝试的次数。默认值为50。
 * **allow_no_crop** (bool): 是否允许未进行裁剪。默认值为True。
 * **cover_all_box** (bool): 是否要求所有的真实标注框都必须在裁剪区域内。默认值为False。
+
+## ComposedRCNNTransforms类
+```python
+paddlex.det.transforms.ComposedRCNNTransforms(mode, min_max_size=[224, 224], mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
+```
+目标检测FasterRCNN和实例分割MaskRCNN模型中已经组合好的数据处理流程,开发者可以直接使用ComposedRCNNTransforms,简化手动组合transforms的过程, 该类中已经包含了[RandomHorizontalFlip](#RandomHorizontalFlip)数据增强方式,你仍可以通过[add_augmenters函数接口](#add_augmenters)添加新的数据增强方式。  
+ComposedRCNNTransforms共包括以下几个步骤:
+> 训练阶段:
+> > 1. 随机以0.5的概率将图像水平翻转
+> > 2. 将图像进行归一化
+> > 3. 图像采用[ResizeByShort](#ResizeByShort)方式,根据min_max_size参数,进行缩入
+> > 4. 使用[Padding](#Padding)将图像的长和宽分别Padding成32的倍数
+> 验证/预测阶段:
+> > 1. 将图像进行归一化
+> > 2. 图像采用[ResizeByShort](#ResizeByShort)方式,根据min_max_size参数,进行缩入
+> > 3. 使用[Padding](#Padding)将图像的长和宽分别Padding成32的倍数
+
+### 参数
+* **mode** (str): Transforms所处的阶段,包括`train', 'eval'或'test'
+* **min_max_size** (list): 输入模型中图像的最短边长度和最长边长度,参考[ResizeByShort](#ResizeByShort)(与原图大小无关,根据上述几个步骤,会将原图处理成相应大小输入给模型训练),默认[800, 1333]
+* **mean** (list): 图像均值, 默认为[0.485, 0.456, 0.406]。
+* **std** (list): 图像方差,默认为[0.229, 0.224, 0.225]。
+
+### 添加数据增强方式
+```python
+ComposedRCNNTransforms.add_augmenters(augmenters)
+```
+> **参数**
+> * **augmenters**(list): 数据增强方式列表
+
+#### 使用示例
+```
+import paddlex as pdx
+from paddlex.det import transforms
+train_transforms = transforms.ComposedRCNNTransforms(mode='train', min_max_size=[800, 1333])
+eval_transforms = transforms.ComposedRCNNTransforms(mode='eval', min_max_size=[800, 1333])
+
+# 添加数据增强
+import imgaug.augmenters as iaa
+train_transforms.add_augmenters([
+			transforms.RandomDistort(),
+			iaa.blur.GaussianBlur(sigma=(0.0, 3.0))
+])
+```
+上面代码等价于
+```
+import paddlex as pdx
+from paddlex.det import transforms
+train_transforms = transforms.Composed([
+		transforms.RandomDistort(),
+		iaa.blur.GaussianBlur(sigma=(0.0, 3.0)),
+		# 上面两个为通过add_augmenters额外添加的数据增强方式
+		transforms.RandomHorizontalFlip(prob=0.5),
+		transforms.Normalize(),
+        transforms.ResizeByShort(short_size=800, max_size=1333),
+        transforms.Padding(coarsest_stride=32)
+])
+eval_transforms = transforms.Composed([
+		transforms.Normalize(),
+        transforms.ResizeByShort(short_size=800, max_size=1333),
+        transforms.Padding(coarsest_stride=32)
+])
+```
+
+
+## ComposedYOLOv3Transforms类
+```python
+paddlex.det.transforms.ComposedYOLOv3Transforms(mode, shape=[608, 608], mixup_epoch=250, mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
+```
+目标检测YOLOv3模型中已经组合好的数据处理流程,开发者可以直接使用ComposedYOLOv3Transforms,简化手动组合transforms的过程, 该类中已经包含了[MixupImage](#MixupImage)、[RandomDistort](#RandomDistort)、[RandomExpand](#RandomExpand)、[RandomCrop](#RandomCrop)、[RandomHorizontalFlip](#RandomHorizontalFlip)5种数据增强方式,你仍可以通过[add_augmenters函数接口](#add_augmenters)添加新的数据增强方式。  
+ComposedYOLOv3Transforms共包括以下几个步骤:
+> 训练阶段:
+> > 1. 在前mixup_epoch轮迭代中,使用MixupImage策略
+> > 2. 对图像进行随机扰动,包括亮度,对比度,饱和度和色调
+> > 3. 随机扩充图像
+> > 4. 随机裁剪图像
+> > 5. 将4步骤的输出图像Resize成shape参数的大小
+> > 6. 随机0.5的概率水平翻转图像
+> > 7. 图像归一化
+> 验证/预测阶段:
+> > 1. 将图像Resize成shape参数大小
+> > 2. 图像归一化
+
+### 参数
+* **mode** (str): Transforms所处的阶段,包括`train', 'eval'或'test'
+* **shape** (list): 输入模型中图像的大小(与原图大小无关,根据上述几个步骤,会将原图处理成相应大小输入给模型训练), 默认[608, 608]
+* **mixup_epoch**(int): 模型训练过程中,在前mixup_epoch轮迭代中,使用mixup策略,如果为-1,则不使用mixup策略, 默认250。
+* **mean** (list): 图像均值, 默认为[0.485, 0.456, 0.406]。
+* **std** (list): 图像方差,默认为[0.229, 0.224, 0.225]。
+
+### 添加数据增强方式
+```python
+ComposedYOLOv3Transforms.add_augmenters(augmenters)
+```
+> **参数**
+> * **augmenters**(list): 数据增强方式列表
+
+#### 使用示例
+```
+import paddlex as pdx
+from paddlex.det import transforms
+train_transforms = transforms.ComposedYOLOv3Transforms(mode='train', shape=[480, 480])
+eval_transforms = transforms.ComposedYOLOv3Transforms(mode='eval', shape=[480, 480])
+
+# 添加数据增强
+import imgaug.augmenters as iaa
+train_transforms.add_augmenters([
+			iaa.blur.GaussianBlur(sigma=(0.0, 3.0))
+])
+```
+上面代码等价于
+```
+import paddlex as pdx
+from paddlex.det import transforms
+train_transforms = transforms.Composed([
+		iaa.blur.GaussianBlur(sigma=(0.0, 3.0)),
+		# 上面为通过add_augmenters额外添加的数据增强方式
+        transforms.MixupImage(mixup_epoch=250),
+        transforms.RandomDistort(),
+        transforms.RandomExpand(),
+        transforms.RandomCrop(),
+        transforms.Resize(target_size=480, interp='RANDOM'),
+        transforms.RandomHorizontalFlip(prob=0.5),
+        transforms.Normalize()
+])
+eval_transforms = transforms.Composed([
+        transforms.Resize(target_size=480, interp='CUBIC'),
+		transforms.Normalize()
+])
+```

+ 60 - 0
docs/apis/transforms/seg_transforms.md

@@ -166,3 +166,63 @@ paddlex.seg.transforms.RandomDistort(brightness_range=0.5, brightness_prob=0.5,
 * **saturation_prob** (float): 随机调整饱和度的概率。默认为0.5。
 * **hue_range** (int): 色调因子的范围。默认为18。
 * **hue_prob** (float): 随机调整色调的概率。默认为0.5。
+
+## ComposedSegTransforms类
+```python
+paddlex.det.transforms.ComposedSegTransforms(mode, train_crop_shape=[769, 769], mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
+```
+语义分割DeepLab和UNet模型中已经组合好的数据处理流程,开发者可以直接使用ComposedSegTransforms,简化手动组合transforms的过程, 该类中已经包含了[RandomHorizontalFlip](#RandomHorizontalFlip)、[ResizeStepScaling](#ResizeStepScaling)、[RandomPaddingCrop](#RandomPaddingCrop)3种数据增强方式,你仍可以通过[add_augmenters函数接口](#add_augmenters)添加新的数据增强方式。  
+ComposedSegTransforms共包括以下几个步骤:
+ > 训练阶段:
+> > 1. 随机对图像以0.5的概率水平翻转
+> > 2. 按不同的比例随机Resize原图
+> > 3. 从原图中随机crop出大小为train_crop_size大小的子图,如若crop出来的图小于train_crop_size,则会将图padding到对应大小
+> > 4. 图像归一化
+ > 预测阶段:
+> > 1. 图像归一化
+
+
+### 参数
+* **mode** (str): Transforms所处的阶段,包括`train', 'eval'或'test'
+* **train_crop_size** (list): 训练过程中随机Crop和Resize后(验证或预测过程中不需配置该参数,自动使用原图大小),输入到模型中图像的大小(与原图大小无关,根据上述几个步骤,会将原图处理成相应大小输入给模型训练), 默认[769, 769]
+* **mean** (list): 图像均值, 默认为[0.485, 0.456, 0.406]。
+* **std** (list): 图像方差,默认为[0.229, 0.224, 0.225]。
+
+### 添加数据增强方式
+```python
+ComposedSegTransforms.add_augmenters(augmenters)
+```
+> **参数**
+> * **augmenters**(list): 数据增强方式列表
+
+#### 使用示例
+```
+import paddlex as pdx
+from paddlex.seg import transforms
+train_transforms = transforms.ComposedSegTransforms(mode='train', train_crop_size=[512, 512])
+eval_transforms = transforms.ComposedYOLOTransforms(mode='eval')
+
+# 添加数据增强
+import imgaug.augmenters as iaa
+train_transforms.add_augmenters([
+			transforms.RandomDistort(),
+			iaa.blur.GaussianBlur(sigma=(0.0, 3.0))
+])
+```
+上面代码等价于
+```
+import paddlex as pdx
+from paddlex.det import transforms
+train_transforms = transforms.Composed([
+		transforms.RandomDistort(),
+		iaa.blur.GaussianBlur(sigma=(0.0, 3.0)),
+		# 上面2行为通过add_augmenters额外添加的数据增强方式
+        transforms.RandomHorizontalFlip(prob=0.5),
+        transforms.ResizeStepScaling(),
+        transforms.PaddingCrop(crop_size=[512, 512]),
+        transforms.Normalize()
+])
+eval_transforms = transforms.Composed([
+        transforms.Normalize()
+])
+```

+ 32 - 0
docs/appendix/parameters.md

@@ -23,3 +23,35 @@ Batch Size指模型在训练过程中,一次性处理的样本数量, 如若
 - [实例分割MaskRCNN-train](https://paddlex.readthedocs.io/zh_CN/latest/apis/models/instance_segmentation.html#train)
 - [语义分割DeepLabv3p-train](https://paddlex.readthedocs.io/zh_CN/latest/apis/models/semantic_segmentation.html#train)
 - [语义分割UNet](https://paddlex.readthedocs.io/zh_CN/latest/apis/models/semantic_segmentation.html#id2)
+
+## 关于lr_decay_epoch, warmup_steps等参数的说明
+
+在PaddleX或其它深度学习模型的训练过程中,经常见到lr_decay_epoch, warmup_steps, warmup_start_lr等参数设置,下面介绍一些这些参数的作用。  
+
+首先这些参数都是用于控制模型训练过程中学习率的变化方式,例如我们在训练时将learning_rate设为0.1, 通常情况,在模型的训练过程中,学习率一直以0.1不变训练下去, 但为了调出更好的模型效果,我们往往不希望学习率一直保持不变。
+
+### warmup_steps和warmup_start_lr
+
+我们在训练模型时,一般都会使用预训练模型,例如检测模型在训练时使用backbone在ImageNet数据集上的预训练权重。但由于在自行训练时,自己的数据与ImageNet数据集存在较大的差异,可能会一开始由于梯度过大使得训练出现问题,因此可以在刚开始训练时,让学习率以一个较小的值,慢慢增长到设定的学习率。因此`warmup_steps`和`warmup_start_lr`就是这个作用,模型开始训练时,学习率会从`warmup_start_lr`开始,在`warmup_steps`内线性增长到设定的学习率。
+
+### lr_decay_epochs和lr_decay_gamma
+
+`lr_decay_epochs`用于让学习率在模型训练后期逐步衰减,它一般是一个list,如[6, 8, 10],表示学习率在第6个epoch时衰减一次,第8个epoch时再衰减一次,第10个epoch时再衰减一次。每次学习率衰减为之前的学习率*lr_decay_gamma
+
+### Notice
+
+在PaddleX中,限制warmup需要在第一个学习率decay衰减前结束,因此要满足下面的公式
+```
+warmup_steps <= lr_decay_epochs[0] * num_steps_each_epoch
+```
+其中公式中`num_steps_each_epoch = num_samples_in_train_dataset // train_batch_size`。  
+
+>  因此如若在训练时PaddleX提示`warmup_steps should be less than xxx`时,即可根据上述公式来调整你的`lr_decay_epochs`或者是`warmup_steps`使得两个参数满足上面的条件
+
+> - 图像分类模型 [train接口文档](https://paddlex.readthedocs.io/zh_CN/latest/apis/models/classification.html#train)
+> - FasterRCNN [train接口文档](https://paddlex.readthedocs.io/zh_CN/latest/apis/models/detection.html#fasterrcnn)
+> - YOLOv3 [train接口文档](https://paddlex.readthedocs.io/zh_CN/latest/apis/models/detection.html#yolov3)
+> - MaskRCNN [train接口文档](https://paddlex.readthedocs.io/zh_CN/latest/apis/models/instance_segmentation.html#maskrcnn)
+> - DeepLab [train接口文档](https://paddlex.readthedocs.io/zh_CN/latest/apis/models/semantic_segmentation.html#deeplabv3p)
+> - UNet [train接口文档](https://paddlex.readthedocs.io/zh_CN/latest/apis/models/semantic_segmentation.html#unet)
+> - HRNet [train接口文档](https://paddlex.readthedocs.io/zh_CN/latest/apis/models/semantic_segmentation.html#hrnet)

BIN
docs/images/._文件(p37) BDSZYF000132754-docs jiangjiajun$ pwd :Users:jiangjiajun:Downloads:PaddleX-develop:docs:vdl1.png


BIN
docs/images/文件(p37) BDSZYF000132754-docs jiangjiajun$ pwd :Users:jiangjiajun:Downloads:PaddleX-develop:docs:vdl1.png


+ 28 - 1
docs/paddlex_gui/download.md

@@ -1 +1,28 @@
-# PaddleX GUI下载安装
+## <a name="2">PaddleX GUI安装</a>
+
+ PaddleX GUI是提升项目开发效率的核心模块,开发者可快速完成深度学习模型全流程开发。我们诚挚地邀请您前往 [官网](https://www.paddlepaddle.org.cn/paddle/paddleX)下载试用PaddleX GUI可视化前端,并获得您宝贵的意见或开源项目贡献。
+
+
+
+#### <a name="1">安装推荐环境</a>
+
+* **操作系统**:
+  * Windows7/8/10(推荐Windows 10);
+  * Mac OS 10.13+;
+  * Ubuntu 18.04+;
+
+***注:处理器需为x86_64架构,支持MKL。***
+
+* **训练硬件**:  
+  * **GPU**(仅Windows及Linux系统):  
+    推荐使用支持CUDA的NVIDIA显卡,例如:GTX 1070+以上性能的显卡;  
+    Windows系统X86_64驱动版本>=411.31;  
+    Linux系统X86_64驱动版本>=410.48;  
+    显存8G以上;  
+  * **CPU**:  
+    PaddleX当前支持您用本地CPU进行训练,但推荐使用GPU以获得更好的开发体验。
+  * **内存**:建议8G以上  
+  * **硬盘空间**:建议SSD剩余空间1T以上(非必须)  
+
+***注:PaddleX在Windows及Mac OS系统只支持单卡模型。Windows系统暂不支持NCCL。***
+

+ 148 - 1
docs/paddlex_gui/how_to_use.md

@@ -1 +1,148 @@
-# PaddleX GUI如何训练模型
+# PaddleX GUI使用文档
+
+飞桨全流程开发工具,集飞桨核心框架、模型库、工具及组件等深度学习开发全流程所需能力于一身,易用易集成,是开发者快速入门深度学习、提升深度学习项目开发效率的最佳辅助工具。
+
+PaddleX GUI是一个应用PaddleX实现的一个图形化开发客户端产品,它使得开发者通过键入式输入即可完成深度学习模型全流程开发,可大幅度提升项目开发效率。飞桨团队期待各位开发者基于PaddleX,实现出各种符合自己产业实际需求的产品。
+
+我们诚挚地邀请您前往 [官网](https://www.paddlepaddle.org.cn/paddlex)下载试用PaddleX GUI,并获得您宝贵的意见或开源项目贡献。
+
+
+
+## 目录
+
+* **产品特性**
+* **PaddleX GUI可视化前端**
+* **FAQ**
+
+
+
+## 产品特性
+
+\-  **全流程打通**
+
+将深度学习开发全流程打通,并提供可视化开发界面, 省去了对各环节API的熟悉过程及重复的代码开发,极大地提升了开发效率。
+
+\-  **易用易集成**
+
+提供功能最全、最灵活的Python API开发模式,完全开源开放,易于集成和二次开发。键入式输入的图形化开发界面,使得非专业算法人员也可快速进行业务POC。
+
+\-  **融合产业实践经验**
+
+融合飞桨产业落地经验,精选高质量的视觉模型方案,开放实际的案例教学,手把手带您实现产业需求落地。
+
+\-  **教程与服务**
+
+从数据集准备到上线部署,为您提供业务开发全流程的文档说明及技术服务。开发者可以通过QQ群、微信群、GitHub社区等多种形式与飞桨团队及同业合作伙伴交流沟通。
+
+
+
+## PaddleX GUI 可视化前端
+
+**第一步:准备数据**
+
+在开始模型训练前,您需要根据不同的任务类型,将数据标注为相应的格式。目前PaddleX支持【图像分类】、【目标检测】、【语义分割】、【实例分割】四种任务类型。不同类型任务的数据处理方式可查看[数据标注方式](https://paddlex.readthedocs.io/zh_CN/latest/appendix/datasets.html)。
+
+ 
+
+**第二步:导入我的数据集**
+
+①数据标注完成后,您需要根据不同的任务,将数据和标注文件,按照客户端提示更名并保存到正确的文件中。
+
+②在客户端新建数据集,选择与数据集匹配的任务类型,并选择数据集对应的路径,将数据集导入。
+
+![](images/datasets1.jpg)
+
+③选定导入数据集后,客户端会自动校验数据及标注文件是否合规,校验成功后,您可根据实际需求,将数据集按比例划分为训练集、验证集、测试集。
+
+④您可在「数据分析」模块按规则预览您标注的数据集,双击单张图片可放大查看。
+
+![](images/dataset2.jpg)
+
+**第三步:创建项目**
+
+① 在完成数据导入后,您可以点击「新建项目」创建一个项目。
+
+② 您可根据实际任务需求选择项目的任务类型,需要注意项目所采用的数据集也带有任务类型属性,两者需要进行匹配。
+
+![](images/project3.jpg)
+
+
+
+**第四步:项目开发**
+
+① **数据选择**:项目创建完成后,您需要选择已载入客户端并校验后的数据集,并点击下一步,进入参数配置页面。
+
+![](images/project1.jpg)
+
+② **参数配置**:主要分为**模型参数**、**训练参数**、**优化策略**三部分。您可根据实际需求选择模型结构、骨架网络及对应的训练参数、优化策略,使得任务效果最佳。
+
+![](images/project2.jpg)
+
+参数配置完成后,点击启动训练,模型开始训练并进行效果评估。
+
+③ **训练可视化**:在训练过程中,您可通过VisualDL查看模型训练过程参数变化、日志详情,及当前最优的训练集和验证集训练指标。模型在训练过程中通过点击"中止训练"随时中止训练过程。
+
+![](images/visualization1.jpg)
+
+模型训练结束后,可选择进入『模型剪裁分析』或者直接进入『模型评估』。
+
+![](images/visualization2.jpg)
+
+④ **模型裁剪**:如果开发者希望减少模型的体积、计算量,提升模型在设备上的预测性能,可以采用PaddleX提供的模型裁剪策略。裁剪过程将对模型各卷积层的敏感度信息进行分析,根据各参数对模型效果的影响进行不同比例的裁剪,再进行精调训练获得最终裁剪后的模型。
+
+![](images/visualization3.jpg)
+
+⑤ **模型评估**:在模型评估页面,您可查看训练后的模型效果。评估方法包括混淆矩阵、精度、召回率等。
+
+![](images/visualization4.jpg)
+
+您还可以选择『数据集切分』时留出的『测试数据集』或从本地文件夹中导入一张/多张图片,将训练后的模型进行测试。根据测试结果,您可决定是否将训练完成的模型保存为预训练模型并进入模型发布页面,或返回先前步骤调整参数配置重新进行训练。
+
+![](images/visualization5.jpg)
+
+
+
+**第五步:模型发布**
+
+当模型效果满意后,您可根据实际的生产环境需求,选择将模型发布为需要的版本。
+
+![](images/publish.jpg)
+
+
+
+## FAQ
+
+1. **为什么训练速度这么慢?**
+
+   PaddleX完全采用您本地的硬件进行计算,深度学习任务确实对算力要求较高,为了使您能快速体验应用PaddleX进行开发,我们适配了CPU硬件,但强烈建议您使用GPU以提升训练速度和开发体验。
+
+   
+
+2. **我可以在服务器或云平台上部署PaddleX么?**
+
+   PaddleX GUI是一个适配本地单机安装的客户端,无法在服务器上直接进行部署,您可以直接使用PaddleX API,或采用飞桨核心框架进行服务器上的部署。如果您希望使用公有算力,强烈建议您尝试飞桨产品系列中的 [EasyDL](https://ai.baidu.com/easydl/) 或 [AI Studio](https://aistudio.baidu.com/aistudio/index)进行开发。
+
+   
+
+3. **PaddleX支持EasyData标注的数据吗?**
+
+   支持,PaddleX可顺畅读取EasyData标注的数据。但当前版本的PaddleX GUI暂时无法支持直接导入EasyData数据格式,您可以参照文档,将[数据集进行转换](https://paddlex.readthedocs.io/zh_CN/latest/appendix/how_to_convert_dataset.html)再导入PaddleX GUI进行后续开发。
+   同时,我们也在紧密开发PaddleX GUI可直接导入EasyData数据格式的功能。
+   
+   
+
+4. **为什么模型裁剪分析耗时这么长?**
+
+   模型裁剪分析过程是对模型各卷积层的敏感度信息进行分析,根据各参数对模型效果的影响进行不同比例的裁剪。此过程需要重复多次直至FLOPS满足要求,最后再进行精调训练获得最终裁剪后的模型,因此耗时较长。有关模型裁剪的原理,可参见文档[剪裁原理介绍](https://paddlepaddle.github.io/PaddleSlim/algo/algo.html#2-%E5%8D%B7%E7%A7%AF%E6%A0%B8%E5%89%AA%E8%A3%81%E5%8E%9F%E7%90%86)
+
+   
+
+5. **如何调用后端代码?**
+
+   PaddleX 团队为您整理了相关的API接口文档,方便您学习和使用。具体请参见[PaddleX API说明文档](https://paddlex.readthedocs.io/zh_CN/latest/apis/index.html)
+
+
+
+**如果您有更多问题或建议,欢迎以issue的形式,或加入PaddleX官方QQ群(1045148026)直接反馈您的问题和需求**
+
+![](images/QR.jpg)

BIN
docs/paddlex_gui/images/QR.jpg


+ 1 - 0
docs/paddlex_gui/images/ReadMe

@@ -0,0 +1 @@
+PaddleX GUI Screen Shot

BIN
docs/paddlex_gui/images/dataset2.jpg


BIN
docs/paddlex_gui/images/datasets1.jpg


BIN
docs/paddlex_gui/images/project1.jpg


BIN
docs/paddlex_gui/images/project2.jpg


BIN
docs/paddlex_gui/images/project3.jpg


BIN
docs/paddlex_gui/images/publish.jpg


BIN
docs/paddlex_gui/images/visualization1.jpg


BIN
docs/paddlex_gui/images/visualization2.jpg


BIN
docs/paddlex_gui/images/visualization3.jpg


BIN
docs/paddlex_gui/images/visualization4.jpg


BIN
docs/paddlex_gui/images/visualization5.jpg


+ 1 - 1
docs/paddlex_gui/index.rst

@@ -21,7 +21,7 @@ PaddleX GUI是基于PaddleX开发实现的可视化模型训练套件,可以
    how_to_use.md
    xx.md
 
-* PaddleX版本: v0.1.7
+* PaddleX GUI版本: v1.0
 * 项目官网: http://www.paddlepaddle.org.cn/paddle/paddlex  
 * 项目GitHub: https://github.com/PaddlePaddle/PaddleX/tree/develop  
 * 官方QQ用户群: 1045148026  

+ 18 - 0
new_tutorials/train/README.md

@@ -0,0 +1,18 @@
+# 使用教程——训练模型
+
+本目录下整理了使用PaddleX训练模型的示例代码,代码中均提供了示例数据的自动下载,并均使用单张GPU卡进行训练。
+
+|代码 | 模型任务 | 数据 |
+|------|--------|---------|
+|classification/mobilenetv2.py | 图像分类MobileNetV2 | 蔬菜分类 |
+|classification/resnet50.py | 图像分类ResNet50 | 蔬菜分类 |
+|detection/faster_rcnn_r50_fpn.py | 目标检测FasterRCNN | 昆虫检测 |
+|detection/mask_rcnn_f50_fpn.py | 实例分割MaskRCNN | 垃圾分拣 |
+|segmentation/deeplabv3p.py | 语义分割DeepLabV3| 视盘分割 |
+|segmentation/unet.py | 语义分割UNet | 视盘分割 |
+
+## 开始训练
+在安装PaddleX后,使用如下命令开始训练
+```
+python classification/mobilenetv2.py
+```

+ 47 - 0
new_tutorials/train/classification/mobilenetv2.py

@@ -0,0 +1,47 @@
+import os
+# 选择使用0号卡
+os.environ['CUDA_VISIBLE_DEVICES'] = '0'
+
+from paddlex.cls import transforms
+import paddlex as pdx
+
+# 下载和解压蔬菜分类数据集
+veg_dataset = 'https://bj.bcebos.com/paddlex/datasets/vegetables_cls.tar.gz'
+pdx.utils.download_and_decompress(veg_dataset, path='./')
+
+# 定义训练和验证时的transforms
+# API说明: https://paddlex.readthedocs.io/zh_CN/latest/apis/transforms/cls_transforms.html#composedclstransforms
+train_transforms = transforms.ComposedClsTransforms(mode='train', crop_size=[224, 224])
+eval_transforms = transforms.ComposedClsTransforms(mode='eval', crop_size=[224, 224])
+
+# 定义训练和验证所用的数据集
+# API说明: https://paddlex.readthedocs.io/zh_CN/latest/apis/datasets/classification.html#imagenet
+train_dataset = pdx.datasets.ImageNet(
+    data_dir='vegetables_cls',
+    file_list='vegetables_cls/train_list.txt',
+    label_list='vegetables_cls/labels.txt',
+    transforms=train_transforms,
+    shuffle=True)
+eval_dataset = pdx.datasets.ImageNet(
+    data_dir='vegetables_cls',
+    file_list='vegetables_cls/val_list.txt',
+    label_list='vegetables_cls/labels.txt',
+    transforms=eval_transforms)
+
+# 初始化模型,并进行训练
+# 可使用VisualDL查看训练指标
+# VisualDL启动方式: visualdl --logdir output/mobilenetv2/vdl_log --port 8001
+# 浏览器打开 https://0.0.0.0:8001即可
+# 其中0.0.0.0为本机访问,如为远程服务, 改成相应机器IP
+
+# API说明: https://paddlex.readthedocs.io/zh_CN/latest/apis/models/classification.html#resnet50
+model = pdx.cls.MobileNetV2(num_classes=len(train_dataset.labels))
+model.train(
+    num_epochs=10,
+    train_dataset=train_dataset,
+    train_batch_size=32,
+    eval_dataset=eval_dataset,
+    lr_decay_epochs=[4, 6, 8],
+    learning_rate=0.025,
+    save_dir='output/mobilenetv2',
+    use_vdl=True)

+ 56 - 0
new_tutorials/train/classification/resnet50.py

@@ -0,0 +1,56 @@
+import os
+# 选择使用0号卡
+os.environ['CUDA_VISIBLE_DEVICES'] = '0'
+
+import paddle.fluid as fluid
+from paddlex.cls import transforms
+import paddlex as pdx
+
+# 下载和解压蔬菜分类数据集
+veg_dataset = 'https://bj.bcebos.com/paddlex/datasets/vegetables_cls.tar.gz'
+pdx.utils.download_and_decompress(veg_dataset, path='./')
+
+# 定义训练和验证时的transforms
+# API说明: https://paddlex.readthedocs.io/zh_CN/latest/apis/transforms/cls_transforms.html#composedclstransforms
+train_transforms = transforms.ComposedClsTransforms(mode='train', crop_size=[224, 224])
+eval_transforms = transforms.ComposedClsTransforms(mode='eval', crop_size=[224, 224])
+
+# 定义训练和验证所用的数据集
+# API说明: https://paddlex.readthedocs.io/zh_CN/latest/apis/datasets/classification.html#imagenet
+train_dataset = pdx.datasets.ImageNet(
+    data_dir='vegetables_cls',
+    file_list='vegetables_cls/train_list.txt',
+    label_list='vegetables_cls/labels.txt',
+    transforms=train_transforms,
+    shuffle=True)
+eval_dataset = pdx.datasets.ImageNet(
+    data_dir='vegetables_cls',
+    file_list='vegetables_cls/val_list.txt',
+    label_list='vegetables_cls/labels.txt',
+    transforms=eval_transforms)
+
+# PaddleX支持自定义构建优化器
+step_each_epoch = train_dataset.num_samples // 32
+learning_rate = fluid.layers.cosine_decay(
+    learning_rate=0.025, step_each_epoch=step_each_epoch, epochs=10)
+optimizer = fluid.optimizer.Momentum(
+    learning_rate=learning_rate,
+    momentum=0.9,
+    regularization=fluid.regularizer.L2Decay(4e-5))
+
+# 初始化模型,并进行训练
+# 可使用VisualDL查看训练指标
+# VisualDL启动方式: visualdl --logdir output/resnet50/vdl_log --port 8001
+# 浏览器打开 https://0.0.0.0:8001即可
+# 其中0.0.0.0为本机访问,如为远程服务, 改成相应机器IP
+
+# API说明: https://paddlex.readthedocs.io/zh_CN/latest/apis/models/classification.html#resnet50
+model = pdx.cls.ResNet50(num_classes=len(train_dataset.labels))
+model.train(
+    num_epochs=10,
+    train_dataset=train_dataset,
+    train_batch_size=32,
+    eval_dataset=eval_dataset,
+    optimizer=optimizer,
+    save_dir='output/resnet50',
+    use_vdl=True)

+ 49 - 0
new_tutorials/train/detection/faster_rcnn_r50_fpn.py

@@ -0,0 +1,49 @@
+import os
+# 选择使用0号卡
+os.environ['CUDA_VISIBLE_DEVICES'] = '0'
+
+from paddlex.det import transforms
+import paddlex as pdx
+
+# 下载和解压昆虫检测数据集
+insect_dataset = 'https://bj.bcebos.com/paddlex/datasets/insect_det.tar.gz'
+pdx.utils.download_and_decompress(insect_dataset, path='./')
+
+# 定义训练和验证时的transforms
+# API说明: https://paddlex.readthedocs.io/zh_CN/latest/apis/transforms/det_transforms.html#composedrcnntransforms
+train_transforms = transforms.ComposedRCNNTransforms(mode='train', min_max_size=[800, 1333])
+eval_transforms = transforms.ComposedRCNNTransforms(mode='eval', min_max_size=[800, 1333])
+
+# 定义训练和验证所用的数据集
+# API说明: https://paddlex.readthedocs.io/zh_CN/latest/apis/datasets/detection.html#vocdetection
+train_dataset = pdx.datasets.VOCDetection(
+    data_dir='insect_det',
+    file_list='insect_det/train_list.txt',
+    label_list='insect_det/labels.txt',
+    transforms=train_transforms,
+    shuffle=True)
+eval_dataset = pdx.datasets.VOCDetection(
+    data_dir='insect_det',
+    file_list='insect_det/val_list.txt',
+    label_list='insect_det/labels.txt',
+    transforms=eval_transforms)
+
+# 初始化模型,并进行训练
+# 可使用VisualDL查看训练指标
+# VisualDL启动方式: visualdl --logdir output/faster_rcnn_r50_fpn/vdl_log --port 8001
+# 浏览器打开 https://0.0.0.0:8001即可
+# 其中0.0.0.0为本机访问,如为远程服务, 改成相应机器IP
+# num_classes 需要设置为包含背景类的类别数,即: 目标类别数量 + 1
+
+# API说明: https://paddlex.readthedocs.io/zh_CN/latest/apis/models/detection.html#fasterrcnn
+num_classes = len(train_dataset.labels) + 1
+model = pdx.det.FasterRCNN(num_classes=num_classes)
+model.train(
+    num_epochs=12,
+    train_dataset=train_dataset,
+    train_batch_size=2,
+    eval_dataset=eval_dataset,
+    learning_rate=0.0025,
+    lr_decay_epochs=[8, 11],
+    save_dir='output/faster_rcnn_r50_fpn',
+    use_vdl=True)

+ 48 - 0
new_tutorials/train/detection/mask_rcnn_r50_fpn.py

@@ -0,0 +1,48 @@
+import os
+# 选择使用0号卡
+os.environ['CUDA_VISIBLE_DEVICES'] = '0'
+
+from paddlex.det import transforms
+import paddlex as pdx
+
+# 下载和解压小度熊分拣数据集
+xiaoduxiong_dataset = 'https://bj.bcebos.com/paddlex/datasets/xiaoduxiong_ins_det.tar.gz'
+pdx.utils.download_and_decompress(xiaoduxiong_dataset, path='./')
+
+# 定义训练和验证时的transforms
+# API说明: https://paddlex.readthedocs.io/zh_CN/latest/apis/transforms/det_transforms.html#composedrcnntransforms
+train_transforms = transforms.ComposedRCNNTransforms(mode='train', min_max_size=[800, 1333])
+eval_transforms = transforms.ComposedRCNNTransforms(mode='eval', min_max_size=[800, 1333])
+
+# 定义训练和验证所用的数据集
+# API说明: https://paddlex.readthedocs.io/zh_CN/latest/apis/datasets/detection.html#cocodetection
+train_dataset = pdx.datasets.CocoDetection(
+    data_dir='xiaoduxiong_ins_det/JPEGImages',
+    ann_file='xiaoduxiong_ins_det/train.json',
+    transforms=train_transforms,
+    shuffle=True)
+eval_dataset = pdx.datasets.CocoDetection(
+    data_dir='xiaoduxiong_ins_det/JPEGImages',
+    ann_file='xiaoduxiong_ins_det/val.json',
+    transforms=eval_transforms)
+
+# 初始化模型,并进行训练
+# 可使用VisualDL查看训练指标
+# VisualDL启动方式: visualdl --logdir output/mask_rcnn_r50_fpn/vdl_log --port 8001
+# 浏览器打开 https://0.0.0.0:8001即可
+# 其中0.0.0.0为本机访问,如为远程服务, 改成相应机器IP
+# num_classes 需要设置为包含背景类的类别数,即: 目标类别数量 + 1
+
+# API说明: https://paddlex.readthedocs.io/zh_CN/latest/apis/models/instance_segmentation.html#maskrcnn
+num_classes = len(train_dataset.labels) + 1
+model = pdx.det.MaskRCNN(num_classes=num_classes)
+model.train(
+    num_epochs=12,
+    train_dataset=train_dataset,
+    train_batch_size=1,
+    eval_dataset=eval_dataset,
+    learning_rate=0.00125,
+    warmup_steps=10,
+    lr_decay_epochs=[8, 11],
+    save_dir='output/mask_rcnn_r50_fpn',
+    use_vdl=True)

+ 48 - 0
new_tutorials/train/detection/yolov3_darknet53.py

@@ -0,0 +1,48 @@
+import os
+# 选择使用0号卡
+os.environ['CUDA_VISIBLE_DEVICES'] = '0'
+
+from paddlex.det import transforms
+import paddlex as pdx
+
+# 下载和解压昆虫检测数据集
+insect_dataset = 'https://bj.bcebos.com/paddlex/datasets/insect_det.tar.gz'
+pdx.utils.download_and_decompress(insect_dataset, path='./')
+
+# 定义训练和验证时的transforms
+# API说明: https://paddlex.readthedocs.io/zh_CN/latest/apis/transforms/det_transforms.html#composedyolotransforms
+train_transforms = transforms.ComposedYOLOv3Transforms(mode='train', shape=[608, 608])
+eval_transforms = transforms.ComposedYOLOv3Transforms(mode='eva', shape=[608, 608])
+
+# 定义训练和验证所用的数据集
+# API说明: https://paddlex.readthedocs.io/zh_CN/latest/apis/datasets/detection.html#vocdetection
+train_dataset = pdx.datasets.VOCDetection(
+    data_dir='insect_det',
+    file_list='insect_det/train_list.txt',
+    label_list='insect_det/labels.txt',
+    transforms=train_transforms,
+    shuffle=True)
+eval_dataset = pdx.datasets.VOCDetection(
+    data_dir='insect_det',
+    file_list='insect_det/val_list.txt',
+    label_list='insect_det/labels.txt',
+    transforms=eval_transforms)
+
+# 初始化模型,并进行训练
+# 可使用VisualDL查看训练指标
+# VisualDL启动方式: visualdl --logdir output/yolov3_darknet/vdl_log --port 8001
+# 浏览器打开 https://0.0.0.0:8001即可
+# 其中0.0.0.0为本机访问,如为远程服务, 改成相应机器IP
+
+# API说明: https://paddlex.readthedocs.io/zh_CN/latest/apis/models/detection.html#yolov3
+num_classes = len(train_dataset.labels)
+model = pdx.det.YOLOv3(num_classes=num_classes, backbone='DarkNet53')
+model.train(
+    num_epochs=270,
+    train_dataset=train_dataset,
+    train_batch_size=8,
+    eval_dataset=eval_dataset,
+    learning_rate=0.000125,
+    lr_decay_epochs=[210, 240],
+    save_dir='output/yolov3_darknet53',
+    use_vdl=True)

+ 51 - 0
new_tutorials/train/segmentation/deeplabv3p.py

@@ -0,0 +1,51 @@
+import os
+# 选择使用0号卡
+os.environ['CUDA_VISIBLE_DEVICES'] = '0'
+
+import paddlex as pdx
+from paddlex.seg import transforms
+
+# 下载和解压视盘分割数据集
+optic_dataset = 'https://bj.bcebos.com/paddlex/datasets/optic_disc_seg.tar.gz'
+pdx.utils.download_and_decompress(optic_dataset, path='./')
+
+# 定义训练和验证时的transforms
+# API说明: https://paddlex.readthedocs.io/zh_CN/latest/apis/transforms/seg_transforms.html#composedsegtransforms
+train_transforms = transforms.ComposedSegTransforms(mode='train', train_crop_size=[769, 769])
+eval_transforms = transforms.ComposedSegTransforms(mode='eval')
+
+train_transforms.add_augmenters([
+    transforms.RandomRotate()
+])
+
+# 定义训练和验证所用的数据集
+# API说明: https://paddlex.readthedocs.io/zh_CN/latest/apis/datasets/semantic_segmentation.html#segdataset
+train_dataset = pdx.datasets.SegDataset(
+    data_dir='optic_disc_seg',
+    file_list='optic_disc_seg/train_list.txt',
+    label_list='optic_disc_seg/labels.txt',
+    transforms=train_transforms,
+    shuffle=True)
+eval_dataset = pdx.datasets.SegDataset(
+    data_dir='optic_disc_seg',
+    file_list='optic_disc_seg/val_list.txt',
+    label_list='optic_disc_seg/labels.txt',
+    transforms=eval_transforms)
+
+# 初始化模型,并进行训练
+# 可使用VisualDL查看训练指标
+# VisualDL启动方式: visualdl --logdir output/deeplab/vdl_log --port 8001
+# 浏览器打开 https://0.0.0.0:8001即可
+# 其中0.0.0.0为本机访问,如为远程服务, 改成相应机器IP
+
+# API说明: https://paddlex.readthedocs.io/zh_CN/latest/apis/models/semantic_segmentation.html#deeplabv3p
+num_classes = len(train_dataset.labels)
+model = pdx.seg.DeepLabv3p(num_classes=num_classes)
+model.train(
+    num_epochs=40,
+    train_dataset=train_dataset,
+    train_batch_size=4,
+    eval_dataset=eval_dataset,
+    learning_rate=0.01,
+    save_dir='output/deeplab',
+    use_vdl=True)

+ 47 - 0
new_tutorials/train/segmentation/hrnet.py

@@ -0,0 +1,47 @@
+import os
+# 选择使用0号卡
+os.environ['CUDA_VISIBLE_DEVICES'] = '0'
+
+import paddlex as pdx
+from paddlex.seg import transforms
+
+# 下载和解压视盘分割数据集
+optic_dataset = 'https://bj.bcebos.com/paddlex/datasets/optic_disc_seg.tar.gz'
+pdx.utils.download_and_decompress(optic_dataset, path='./')
+
+# 定义训练和验证时的transforms
+# API说明: https://paddlex.readthedocs.io/zh_CN/latest/apis/transforms/seg_transforms.html#composedsegtransforms
+train_transforms = transforms.ComposedSegTransforms(mode='train', train_crop_size=[769, 769])
+eval_transforms = transforms.ComposedSegTransforms(mode='eval')
+
+# 定义训练和验证所用的数据集
+# API说明: https://paddlex.readthedocs.io/zh_CN/latest/apis/datasets/semantic_segmentation.html#segdataset
+train_dataset = pdx.datasets.SegDataset(
+    data_dir='optic_disc_seg',
+    file_list='optic_disc_seg/train_list.txt',
+    label_list='optic_disc_seg/labels.txt',
+    transforms=train_transforms,
+    shuffle=True)
+eval_dataset = pdx.datasets.SegDataset(
+    data_dir='optic_disc_seg',
+    file_list='optic_disc_seg/val_list.txt',
+    label_list='optic_disc_seg/labels.txt',
+    transforms=eval_transforms)
+
+# 初始化模型,并进行训练
+# 可使用VisualDL查看训练指标
+# VisualDL启动方式: visualdl --logdir output/unet/vdl_log --port 8001
+# 浏览器打开 https://0.0.0.0:8001即可
+# 其中0.0.0.0为本机访问,如为远程服务, 改成相应机器IP
+
+# https://paddlex.readthedocs.io/zh_CN/latest/apis/models/semantic_segmentation.html#hrnet
+num_classes = len(train_dataset.labels)
+model = pdx.seg.HRNet(num_classes=num_classes)
+model.train(
+    num_epochs=20,
+    train_dataset=train_dataset,
+    train_batch_size=4,
+    eval_dataset=eval_dataset,
+    learning_rate=0.01,
+    save_dir='output/hrnet',
+    use_vdl=True)

+ 47 - 0
new_tutorials/train/segmentation/unet.py

@@ -0,0 +1,47 @@
+import os
+# 选择使用0号卡
+os.environ['CUDA_VISIBLE_DEVICES'] = '0'
+
+import paddlex as pdx
+from paddlex.seg import transforms
+
+# 下载和解压视盘分割数据集
+optic_dataset = 'https://bj.bcebos.com/paddlex/datasets/optic_disc_seg.tar.gz'
+pdx.utils.download_and_decompress(optic_dataset, path='./')
+
+# 定义训练和验证时的transforms
+# API说明: https://paddlex.readthedocs.io/zh_CN/latest/apis/transforms/seg_transforms.html#composedsegtransforms
+train_transforms = transforms.ComposedSegTransforms(mode='train', train_crop_size=[769, 769])
+eval_transforms = transforms.ComposedSegTransforms(mode='eval')
+
+# 定义训练和验证所用的数据集
+# API说明: https://paddlex.readthedocs.io/zh_CN/latest/apis/datasets/semantic_segmentation.html#segdataset
+train_dataset = pdx.datasets.SegDataset(
+    data_dir='optic_disc_seg',
+    file_list='optic_disc_seg/train_list.txt',
+    label_list='optic_disc_seg/labels.txt',
+    transforms=train_transforms,
+    shuffle=True)
+eval_dataset = pdx.datasets.SegDataset(
+    data_dir='optic_disc_seg',
+    file_list='optic_disc_seg/val_list.txt',
+    label_list='optic_disc_seg/labels.txt',
+    transforms=eval_transforms)
+
+# 初始化模型,并进行训练
+# 可使用VisualDL查看训练指标
+# VisualDL启动方式: visualdl --logdir output/unet/vdl_log --port 8001
+# 浏览器打开 https://0.0.0.0:8001即可
+# 其中0.0.0.0为本机访问,如为远程服务, 改成相应机器IP
+
+# API说明: https://paddlex.readthedocs.io/zh_CN/latest/apis/models/semantic_segmentation.html#unet
+num_classes = len(train_dataset.labels)
+model = pdx.seg.UNet(num_classes=num_classes)
+model.train(
+    num_epochs=20,
+    train_dataset=train_dataset,
+    train_batch_size=4,
+    eval_dataset=eval_dataset,
+    learning_rate=0.01,
+    save_dir='output/unet',
+    use_vdl=True)

+ 1 - 1
paddlex/__init__.py

@@ -53,4 +53,4 @@ log_level = 2
 
 from . import interpret
 
-__version__ = '1.0.4'
+__version__ = '1.0.5'

+ 32 - 9
paddlex/cv/models/classifier.py

@@ -52,8 +52,7 @@ class BaseClassifier(BaseAPI):
             input_shape = [
                 None, 3, self.fixed_input_shape[1], self.fixed_input_shape[0]
             ]
-            image = fluid.data(
-                dtype='float32', shape=input_shape, name='image')
+            image = fluid.data(dtype='float32', shape=input_shape, name='image')
         else:
             image = fluid.data(
                 dtype='float32', shape=[None, 3, None, None], name='image')
@@ -81,7 +80,8 @@ class BaseClassifier(BaseAPI):
             del outputs['loss']
         return inputs, outputs
 
-    def default_optimizer(self, learning_rate, lr_decay_epochs, lr_decay_gamma,
+    def default_optimizer(self, learning_rate, warmup_steps, warmup_start_lr,
+                          lr_decay_epochs, lr_decay_gamma,
                           num_steps_each_epoch):
         boundaries = [b * num_steps_each_epoch for b in lr_decay_epochs]
         values = [
@@ -90,6 +90,24 @@ class BaseClassifier(BaseAPI):
         ]
         lr_decay = fluid.layers.piecewise_decay(
             boundaries=boundaries, values=values)
+        if warmup_steps > 0:
+            if warmup_steps > lr_decay_epochs[0] * num_steps_each_epoch:
+                logging.error(
+                    "In function train(), parameters should satisfy: warmup_steps <= lr_decay_epochs[0]*num_samples_in_train_dataset",
+                    exit=False)
+                logging.error(
+                    "See this doc for more information: https://github.com/PaddlePaddle/PaddleX/blob/develop/docs/appendix/parameters.md#notice",
+                    exit=False)
+                logging.error(
+                    "warmup_steps should less than {} or lr_decay_epochs[0] greater than {}, please modify 'lr_decay_epochs' or 'warmup_steps' in train function".
+                    format(lr_decay_epochs[0] * num_steps_each_epoch,
+                           warmup_steps // num_steps_each_epoch))
+
+            lr_decay = fluid.layers.linear_lr_warmup(
+                learning_rate=lr_decay,
+                warmup_steps=warmup_steps,
+                start_lr=warmup_start_lr,
+                end_lr=learning_rate)
         optimizer = fluid.optimizer.Momentum(
             lr_decay,
             momentum=0.9,
@@ -107,6 +125,8 @@ class BaseClassifier(BaseAPI):
               pretrain_weights='IMAGENET',
               optimizer=None,
               learning_rate=0.025,
+              warmup_steps=0,
+              warmup_start_lr=0.0,
               lr_decay_epochs=[30, 60, 90],
               lr_decay_gamma=0.1,
               use_vdl=False,
@@ -129,6 +149,8 @@ class BaseClassifier(BaseAPI):
             optimizer (paddle.fluid.optimizer): 优化器。当该参数为None时,使用默认优化器:
                 fluid.layers.piecewise_decay衰减策略,fluid.optimizer.Momentum优化方法。
             learning_rate (float): 默认优化器的初始学习率。默认为0.025。
+            warmup_steps(int): 学习率从warmup_start_lr上升至设定的learning_rate,所需的步数,默认为0
+            warmup_start_lr(float): 学习率在warmup阶段时的起始值,默认为0.0
             lr_decay_epochs (list): 默认优化器的学习率衰减轮数。默认为[30, 60, 90]。
             lr_decay_gamma (float): 默认优化器的学习率衰减率。默认为0.1。
             use_vdl (bool): 是否使用VisualDL进行可视化。默认值为False。
@@ -149,6 +171,8 @@ class BaseClassifier(BaseAPI):
             num_steps_each_epoch = train_dataset.num_samples // train_batch_size
             optimizer = self.default_optimizer(
                 learning_rate=learning_rate,
+                warmup_steps=warmup_steps,
+                warmup_start_lr=warmup_start_lr,
                 lr_decay_epochs=lr_decay_epochs,
                 lr_decay_gamma=lr_decay_gamma,
                 num_steps_each_epoch=num_steps_each_epoch)
@@ -193,8 +217,7 @@ class BaseClassifier(BaseAPI):
           tuple (metrics, eval_details): 当return_details为True时,增加返回dict,
               包含关键字:'true_labels'、'pred_scores',分别代表真实类别id、每个类别的预测得分。
         """
-        self.arrange_transforms(
-            transforms=eval_dataset.transforms, mode='eval')
+        self.arrange_transforms(transforms=eval_dataset.transforms, mode='eval')
         data_generator = eval_dataset.generator(
             batch_size=batch_size, drop_last=False)
         k = min(5, self.num_classes)
@@ -206,9 +229,8 @@ class BaseClassifier(BaseAPI):
                 self.test_prog).with_data_parallel(
                     share_vars_from=self.parallel_train_prog)
         batch_size_each_gpu = self._get_single_card_bs(batch_size)
-        logging.info(
-            "Start to evaluating(total_samples={}, total_steps={})...".format(
-                eval_dataset.num_samples, total_steps))
+        logging.info("Start to evaluating(total_samples={}, total_steps={})...".
+                     format(eval_dataset.num_samples, total_steps))
         for step, data in tqdm.tqdm(
                 enumerate(data_generator()), total=total_steps):
             images = np.array([d[0] for d in data]).astype('float32')
@@ -264,7 +286,8 @@ class BaseClassifier(BaseAPI):
             im = self.test_transforms(img_file)
         result = self.exe.run(self.test_prog,
                               feed={'image': im},
-                              fetch_list=list(self.test_outputs.values()))
+                              fetch_list=list(self.test_outputs.values()),
+                              use_program_cache=True)
         pred_label = np.argsort(result[0][0])[::-1][:true_topk]
         res = [{
             'category_id': l,

+ 4 - 2
paddlex/cv/models/deeplabv3p.py

@@ -337,7 +337,8 @@ class DeepLabv3p(BaseAPI):
             for d in data:
                 padding_label = np.zeros(
                     (1, im_h, im_w)).astype('int64') + self.ignore_index
-                padding_label[:, :im_h, :im_w] = d[1]
+                _, label_h, label_w = d[1].shape
+                padding_label[:, :label_h, :label_w] = d[1]
                 labels.append(padding_label)
             labels = np.array(labels)
 
@@ -398,7 +399,8 @@ class DeepLabv3p(BaseAPI):
         im = np.expand_dims(im, axis=0)
         result = self.exe.run(self.test_prog,
                               feed={'image': im},
-                              fetch_list=list(self.test_outputs.values()))
+                              fetch_list=list(self.test_outputs.values()),
+                              use_program_cache=True)
         pred = result[0]
         pred = np.squeeze(pred).astype('uint8')
         logit = result[1]

+ 16 - 10
paddlex/cv/models/faster_rcnn.py

@@ -138,8 +138,16 @@ class FasterRCNN(BaseAPI):
                           lr_decay_epochs, lr_decay_gamma,
                           num_steps_each_epoch):
         if warmup_steps > lr_decay_epochs[0] * num_steps_each_epoch:
-            raise Exception("warmup_steps should less than {}".format(
-                lr_decay_epochs[0] * num_steps_each_epoch))
+            logging.error(
+                "In function train(), parameters should satisfy: warmup_steps <= lr_decay_epochs[0]*num_samples_in_train_dataset",
+                exit=False)
+            logging.error(
+                "See this doc for more information: https://github.com/PaddlePaddle/PaddleX/blob/develop/docs/appendix/parameters.md#notice",
+                exit=False)
+            logging.error(
+                "warmup_steps should less than {} or lr_decay_epochs[0] greater than {}, please modify 'lr_decay_epochs' or 'warmup_steps' in train function".
+                format(lr_decay_epochs[0] * num_steps_each_epoch, warmup_steps
+                       // num_steps_each_epoch))
         boundaries = [b * num_steps_each_epoch for b in lr_decay_epochs]
         values = [(lr_decay_gamma**i) * learning_rate
                   for i in range(len(lr_decay_epochs) + 1)]
@@ -282,8 +290,7 @@ class FasterRCNN(BaseAPI):
                 eval_details为dict,包含关键字:'bbox',对应元素预测结果列表,每个预测结果由图像id、
                 预测框类别id、预测框坐标、预测框得分;’gt‘:真实标注框相关信息。
         """
-        self.arrange_transforms(
-            transforms=eval_dataset.transforms, mode='eval')
+        self.arrange_transforms(transforms=eval_dataset.transforms, mode='eval')
         if metric is None:
             if hasattr(self, 'metric') and self.metric is not None:
                 metric = self.metric
@@ -302,14 +309,12 @@ class FasterRCNN(BaseAPI):
             logging.warning(
                 "Faster RCNN supports batch_size=1 only during evaluating, so batch_size is forced to be set to 1."
             )
-        dataset = eval_dataset.generator(
-            batch_size=batch_size, drop_last=False)
+        dataset = eval_dataset.generator(batch_size=batch_size, drop_last=False)
 
         total_steps = math.ceil(eval_dataset.num_samples * 1.0 / batch_size)
         results = list()
-        logging.info(
-            "Start to evaluating(total_samples={}, total_steps={})...".format(
-                eval_dataset.num_samples, total_steps))
+        logging.info("Start to evaluating(total_samples={}, total_steps={})...".
+                     format(eval_dataset.num_samples, total_steps))
         for step, data in tqdm.tqdm(enumerate(dataset()), total=total_steps):
             images = np.array([d[0] for d in data]).astype('float32')
             im_infos = np.array([d[1] for d in data]).astype('float32')
@@ -389,7 +394,8 @@ class FasterRCNN(BaseAPI):
                                    'im_shape': im_shape
                                },
                                fetch_list=list(self.test_outputs.values()),
-                               return_numpy=False)
+                               return_numpy=False,
+                               use_program_cache=True)
         res = {
             k: (np.array(v), v.recursive_sequence_lengths())
             for k, v in zip(list(self.test_outputs.keys()), outputs)

+ 16 - 10
paddlex/cv/models/mask_rcnn.py

@@ -97,8 +97,16 @@ class MaskRCNN(FasterRCNN):
                           lr_decay_epochs, lr_decay_gamma,
                           num_steps_each_epoch):
         if warmup_steps > lr_decay_epochs[0] * num_steps_each_epoch:
-            raise Exception("warmup_step should less than {}".format(
-                lr_decay_epochs[0] * num_steps_each_epoch))
+            logging.error(
+                "In function train(), parameters should satisfy: warmup_steps <= lr_decay_epochs[0]*num_samples_in_train_dataset",
+                exit=False)
+            logging.error(
+                "See this doc for more information: https://github.com/PaddlePaddle/PaddleX/blob/develop/docs/appendix/parameters.md#notice",
+                exit=False)
+            logging.error(
+                "warmup_steps should less than {} or lr_decay_epochs[0] greater than {}, please modify 'lr_decay_epochs' or 'warmup_steps' in train function".
+                format(lr_decay_epochs[0] * num_steps_each_epoch, warmup_steps
+                       // num_steps_each_epoch))
         boundaries = [b * num_steps_each_epoch for b in lr_decay_epochs]
         values = [(lr_decay_gamma**i) * learning_rate
                   for i in range(len(lr_decay_epochs) + 1)]
@@ -244,8 +252,7 @@ class MaskRCNN(FasterRCNN):
                 预测框坐标、预测框得分;'mask',对应元素预测区域结果列表,每个预测结果由图像id、
                 预测区域类别id、预测区域坐标、预测区域得分;’gt‘:真实标注框和标注区域相关信息。
         """
-        self.arrange_transforms(
-            transforms=eval_dataset.transforms, mode='eval')
+        self.arrange_transforms(transforms=eval_dataset.transforms, mode='eval')
         if metric is None:
             if hasattr(self, 'metric') and self.metric is not None:
                 metric = self.metric
@@ -266,9 +273,8 @@ class MaskRCNN(FasterRCNN):
 
         total_steps = math.ceil(eval_dataset.num_samples * 1.0 / batch_size)
         results = list()
-        logging.info(
-            "Start to evaluating(total_samples={}, total_steps={})...".format(
-                eval_dataset.num_samples, total_steps))
+        logging.info("Start to evaluating(total_samples={}, total_steps={})...".
+                     format(eval_dataset.num_samples, total_steps))
         for step, data in tqdm.tqdm(
                 enumerate(data_generator()), total=total_steps):
             images = np.array([d[0] for d in data]).astype('float32')
@@ -310,8 +316,7 @@ class MaskRCNN(FasterRCNN):
                     zip(['bbox_map', 'segm_map'],
                         [ap_stats[0][1], ap_stats[1][1]]))
             else:
-                metrics = OrderedDict(
-                    zip(['bbox_map', 'segm_map'], [0.0, 0.0]))
+                metrics = OrderedDict(zip(['bbox_map', 'segm_map'], [0.0, 0.0]))
         elif metric == 'COCO':
             if isinstance(ap_stats[0], np.ndarray) and isinstance(ap_stats[1],
                                                                   np.ndarray):
@@ -357,7 +362,8 @@ class MaskRCNN(FasterRCNN):
                                    'im_shape': im_shape
                                },
                                fetch_list=list(self.test_outputs.values()),
-                               return_numpy=False)
+                               return_numpy=False,
+                               use_program_cache=True)
         res = {
             k: (np.array(v), v.recursive_sequence_lengths())
             for k, v in zip(list(self.test_outputs.keys()), outputs)

+ 38 - 8
paddlex/cv/models/slim/prune.py

@@ -66,16 +66,15 @@ def sensitivity(program,
             progress = "%.2f%%" % (progress * 100)
             logging.info(
                 "Total evaluate iters={}, current={}, progress={}, eta={}".
-                format(
-                    total_evaluate_iters, current_iter, progress,
-                    seconds_to_hms(
-                        int(cost * (total_evaluate_iters - current_iter)))),
+                format(total_evaluate_iters, current_iter, progress,
+                       seconds_to_hms(
+                           int(cost * (total_evaluate_iters - current_iter)))),
                 use_color=True)
             current_iter += 1
 
             pruner = Pruner()
-            logging.info("sensitive - param: {}; ratios: {}".format(
-                name, ratio))
+            logging.info("sensitive - param: {}; ratios: {}".format(name,
+                                                                    ratio))
             pruned_program, param_backup, _ = pruner.prune(
                 program=graph.program,
                 scope=scope,
@@ -87,8 +86,8 @@ def sensitivity(program,
                 param_backup=True)
             pruned_metric = eval_func(pruned_program)
             loss = (baseline - pruned_metric) / baseline
-            logging.info("pruned param: {}; {}; loss={}".format(
-                name, ratio, loss))
+            logging.info("pruned param: {}; {}; loss={}".format(name, ratio,
+                                                                loss))
 
             sensitivities[name][ratio] = loss
 
@@ -116,6 +115,21 @@ def channel_prune(program, prune_names, prune_ratios, place, only_graph=False):
     Returns:
         paddle.fluid.Program: 裁剪后的Program。
     """
+    prog_var_shape_dict = {}
+    for var in program.list_vars():
+        try:
+            prog_var_shape_dict[var.name] = var.shape
+        except Exception:
+            pass
+    index = 0
+    for param, ratio in zip(prune_names, prune_ratios):
+        origin_num = prog_var_shape_dict[param][0]
+        pruned_num = int(round(origin_num * ratio))
+        while origin_num == pruned_num:
+            ratio -= 0.1
+            pruned_num = int(round(origin_num * (ratio)))
+            prune_ratios[index] = ratio
+        index += 1
     scope = fluid.global_scope()
     pruner = Pruner()
     program, _, _ = pruner.prune(
@@ -221,6 +235,9 @@ def cal_params_sensitivities(model, save_file, eval_dataset, batch_size=8):
 
             其中``weight_0``是卷积Kernel名;``sensitivities['weight_0']``是一个字典,key是裁剪率,value是敏感度。
     """
+    if os.path.exists(save_file):
+        os.remove(save_file)
+
     prune_names = get_prune_params(model)
 
     def eval_for_prune(program):
@@ -284,6 +301,19 @@ def cal_model_size(program, place, sensitivities_file, eval_metric_loss=0.05):
     """
     prune_params_ratios = get_params_ratios(sensitivities_file,
                                             eval_metric_loss)
+    prog_var_shape_dict = {}
+    for var in program.list_vars():
+        try:
+            prog_var_shape_dict[var.name] = var.shape
+        except Exception:
+            pass
+    for param, ratio in prune_params_ratios.items():
+        origin_num = prog_var_shape_dict[param][0]
+        pruned_num = int(round(origin_num * ratio))
+        while origin_num == pruned_num:
+            ratio -= 0.1
+            pruned_num = int(round(origin_num * (ratio)))
+            prune_params_ratios[param] = ratio
     prune_program = channel_prune(
         program,
         list(prune_params_ratios.keys()),

+ 1 - 1
paddlex/cv/models/slim/prune_config.py

@@ -162,7 +162,7 @@ def get_prune_params(model):
                 continue
             prune_names.append(param.name)
     elif model_type.startswith("MobileNetV3"):
-        if model_type == 'MobileNetV3_small':
+        if model_type.startswith('MobileNetV3_small'):
             expand_prune_id = [3, 4]
         else:
             expand_prune_id = [2, 3, 4, 8, 9, 11]

+ 15 - 8
paddlex/cv/models/yolo_v3.py

@@ -128,8 +128,16 @@ class YOLOv3(BaseAPI):
                           lr_decay_epochs, lr_decay_gamma,
                           num_steps_each_epoch):
         if warmup_steps > lr_decay_epochs[0] * num_steps_each_epoch:
-            raise Exception("warmup_steps should less than {}".format(
-                lr_decay_epochs[0] * num_steps_each_epoch))
+            logging.error(
+                "In function train(), parameters should satisfy: warmup_steps <= lr_decay_epochs[0]*num_samples_in_train_dataset",
+                exit=False)
+            logging.error(
+                "See this doc for more information: https://github.com/PaddlePaddle/PaddleX/blob/develop/docs/appendix/parameters.md#notice",
+                exit=False)
+            logging.error(
+                "warmup_steps should less than {} or lr_decay_epochs[0] greater than {}, please modify 'lr_decay_epochs' or 'warmup_steps' in train function".
+                format(lr_decay_epochs[0] * num_steps_each_epoch, warmup_steps
+                       // num_steps_each_epoch))
         boundaries = [b * num_steps_each_epoch for b in lr_decay_epochs]
         values = [(lr_decay_gamma**i) * learning_rate
                   for i in range(len(lr_decay_epochs) + 1)]
@@ -277,8 +285,7 @@ class YOLOv3(BaseAPI):
                 eval_details为dict,包含关键字:'bbox',对应元素预测结果列表,每个预测结果由图像id、
                 预测框类别id、预测框坐标、预测框得分;’gt‘:真实标注框相关信息。
         """
-        self.arrange_transforms(
-            transforms=eval_dataset.transforms, mode='eval')
+        self.arrange_transforms(transforms=eval_dataset.transforms, mode='eval')
         if metric is None:
             if hasattr(self, 'metric') and self.metric is not None:
                 metric = self.metric
@@ -298,9 +305,8 @@ class YOLOv3(BaseAPI):
 
         data_generator = eval_dataset.generator(
             batch_size=batch_size, drop_last=False)
-        logging.info(
-            "Start to evaluating(total_samples={}, total_steps={})...".format(
-                eval_dataset.num_samples, total_steps))
+        logging.info("Start to evaluating(total_samples={}, total_steps={})...".
+                     format(eval_dataset.num_samples, total_steps))
         for step, data in tqdm.tqdm(
                 enumerate(data_generator()), total=total_steps):
             images = np.array([d[0] for d in data])
@@ -363,7 +369,8 @@ class YOLOv3(BaseAPI):
                                feed={'image': im,
                                      'im_size': im_size},
                                fetch_list=list(self.test_outputs.values()),
-                               return_numpy=False)
+                               return_numpy=False,
+                               use_program_cache=True)
         res = {
             k: (np.array(v), v.recursive_sequence_lengths())
             for k, v in zip(list(self.test_outputs.keys()), outputs)

+ 6 - 1
paddlex/cv/transforms/cls_transforms.py

@@ -18,6 +18,7 @@ import random
 import os.path as osp
 import numpy as np
 from PIL import Image, ImageEnhance
+import paddlex.utils.logging as logging
 
 
 class ClsTransform:
@@ -96,7 +97,11 @@ class Compose(ClsTransform):
         if not isinstance(augmenters, list):
             raise Exception(
                 "augmenters should be list type in func add_augmenters()")
-        self.transforms = augmenters + self.transforms.transforms
+        transform_names = [type(x).__name__ for x in self.transforms]
+        for aug in augmenters:
+            if type(aug).__name__ in transform_names:
+                logging.error("{} is already in ComposedTransforms, need to remove it from add_augmenters().".format(type(aug).__name__))
+        self.transforms = augmenters + self.transforms
 
 
 class RandomCrop(ClsTransform):

+ 8 - 3
paddlex/cv/transforms/det_transforms.py

@@ -27,6 +27,7 @@ from PIL import Image, ImageEnhance
 from .imgaug_support import execute_imgaug
 from .ops import *
 from .box_utils import *
+import paddlex.utils.logging as logging
 
 
 class DetTransform:
@@ -156,7 +157,11 @@ class Compose(DetTransform):
         if not isinstance(augmenters, list):
             raise Exception(
                 "augmenters should be list type in func add_augmenters()")
-        self.transforms = augmenters + self.transforms.transforms
+        transform_names = [type(x).__name__ for x in self.transforms]
+        for aug in augmenters:
+            if type(aug).__name__ in transform_names:
+                logging.error("{} is already in ComposedTransforms, need to remove it from add_augmenters().".format(type(aug).__name__))
+        self.transforms = augmenters + self.transforms
 
 
 class ResizeByShort(DetTransform):
@@ -1282,7 +1287,7 @@ class ComposedRCNNTransforms(Compose):
         super(ComposedRCNNTransforms, self).__init__(transforms)
 
 
-class ComposedYOLOTransforms(Compose):
+class ComposedYOLOv3Transforms(Compose):
     """YOLOv3模型的图像预处理流程,具体如下,
         训练阶段:
         1. 在前mixup_epoch轮迭代中,使用MixupImage策略,见https://paddlex.readthedocs.io/zh_CN/latest/apis/transforms/det_transforms.html#mixupimage
@@ -1337,4 +1342,4 @@ class ComposedYOLOTransforms(Compose):
                     target_size=width, interp='CUBIC'), Normalize(
                         mean=mean, std=std)
             ]
-        super(ComposedYOLOTransforms, self).__init__(transforms)
+        super(ComposedYOLOv3Transforms, self).__init__(transforms)

+ 7 - 2
paddlex/cv/transforms/seg_transforms.py

@@ -21,6 +21,7 @@ import numpy as np
 from PIL import Image
 import cv2
 from collections import OrderedDict
+import paddlex.utils.logging as logging
 
 
 class SegTransform:
@@ -112,7 +113,11 @@ class Compose(SegTransform):
         if not isinstance(augmenters, list):
             raise Exception(
                 "augmenters should be list type in func add_augmenters()")
-        self.transforms = augmenters + self.transforms.transforms
+        transform_names = [type(x).__name__ for x in self.transforms]
+        for aug in augmenters:
+            if type(aug).__name__ in transform_names:
+                logging.error("{} is already in ComposedTransforms, need to remove it from add_augmenters().".format(type(aug).__name__))
+        self.transforms = augmenters + self.transforms
 
 
 class RandomHorizontalFlip(SegTransform):
@@ -1127,6 +1132,6 @@ class ComposedSegTransforms(Compose):
             ]
         else:
             # 验证/预测时的transforms
-            transforms = [Resize(512), Normalize(mean=mean, std=std)]
+            transforms = [Normalize(mean=mean, std=std)]
 
         super(ComposedSegTransforms, self).__init__(transforms)

+ 1 - 1
paddlex/slim.py

@@ -31,4 +31,4 @@ def export_quant_model(model,
         batch_size=batch_size,
         batch_num=batch_num,
         save_dir=save_dir,
-        cache_dir='./temp')
+        cache_dir=cache_dir)

+ 8 - 8
paddlex/utils/logging.py

@@ -29,13 +29,11 @@ def log(level=2, message="", use_color=False):
     current_time = time.strftime("%Y-%m-%d %H:%M:%S", time_array)
     if paddlex.log_level >= level:
         if use_color:
-            print("\033[1;31;40m{} [{}]\t{}\033[0m".format(
-                current_time, levels[level],
-                message).encode("utf-8").decode("latin1"))
+            print("\033[1;31;40m{} [{}]\t{}\033[0m".format(current_time, levels[
+                level], message).encode("utf-8").decode("latin1"))
         else:
-            print(
-                "{} [{}]\t{}".format(current_time, levels[level],
-                                     message).encode("utf-8").decode("latin1"))
+            print("{} [{}]\t{}".format(current_time, levels[level], message)
+                  .encode("utf-8").decode("latin1"))
         sys.stdout.flush()
 
 
@@ -47,9 +45,11 @@ def info(message="", use_color=False):
     log(level=2, message=message, use_color=use_color)
 
 
-def warning(message="", use_color=False):
+def warning(message="", use_color=True):
     log(level=1, message=message, use_color=use_color)
 
 
-def error(message="", use_color=False):
+def error(message="", use_color=True, exit=True):
     log(level=0, message=message, use_color=use_color)
+    if exit:
+        sys.exit(-1)

+ 1 - 1
setup.py

@@ -19,7 +19,7 @@ long_description = "PaddleX. A end-to-end deeplearning model development toolkit
 
 setuptools.setup(
     name="paddlex",
-    version='1.0.4',
+    version='1.0.5',
     author="paddlex",
     author_email="paddlex@baidu.com",
     description=long_description,