Browse Source

Merge remote-tracking branch 'paddle/develop' into develop
update

syyxsxx 5 years ago
parent
commit
6487d3f138

BIN
.DS_Store


+ 16 - 0
deploy/cpp/src/transforms.cpp

@@ -197,6 +197,22 @@ void Transforms::Init(const YAML::Node& transforms_node, bool to_rgb) {
   to_rgb_ = to_rgb;
   for (const auto& item : transforms_node) {
     std::string name = item.begin()->first.as<std::string>();
+    if (name == "ArrangeClassifier") {
+      continue;
+    }
+    if (name == "ArrangeSegmenter") {
+      continue;
+    }
+    if (name == "ArrangeFasterRCNN") {
+      continue;
+    }
+    if (name == "ArrangeMaskRCNN") {
+      continue;
+    }
+    if (name == "ArrangeYOLOv3") {
+      continue;
+    }
+
     std::shared_ptr<Transform> transform = CreateTransform(name);
     transform->Init(item.begin()->second);
     transforms_.push_back(transform);

+ 0 - 0
deploy/openvino/python/convertor.py → deploy/openvino/python/converter.py


BIN
docs/.DS_Store


+ 0 - 1
docs/apis/models/semantic_segmentation.md

@@ -138,7 +138,6 @@ overlap_tile_predict(self, img_file, tile_size=[512, 512], pad_size=[64, 64], ba
 > >
 > > - **dict**: 包含关键字'label_map'和'score_map', 'label_map'存储预测结果灰度图,像素值表示对应的类别,'score_map'存储各类别的概率,shape=(h, w, num_classes)。
 
-
 ## paddlex.seg.UNet
 
 ```python

+ 77 - 16
docs/appendix/how_to_offline_run.md

@@ -4,25 +4,12 @@ PaddleX在模型训练时,存在以下两种情况需要进行联网下载
 > 1.训练模型时,用户没有配置自定义的预训练模型权重`pretrain_weights`,此时PaddleX会自动联网下载在标准数据集上的预训练模型;
 > 2.模型裁剪训练时,用户没有配置自定义的参数敏感度信息文件`sensitivities_file`,并将`sensitivities_file`配置成了'DEFAULT'字符串,此时PaddleX会自动联网下载模型在标准数据集上计算得到的参数敏感度信息文件。
 
-
-用户可以通过本文最末的代码先下载好所有的预训练模型到指定的目录(在代码中我们下载到了`/home/work/paddlex_pretrain`目录)
-
-在训练模型时,需要配置paddlex全局预训练模型路径,将此路径指定到存放了所有预训练模型的路径即可,如下示例代码
-```
-import paddlex as pdx
-# 在import paddlex指定全局的预训练模型路径
-# 模型训练时会跳过下载的过程,使用该目录下载好的模型
-pdx.pretrain_dir = '/home/work/paddlex_pretrain'
-```
-按上方式配置后,之后即可进行无联网模型训练
-
-
-### 下载所有预训练模型代码
-
-> 所有预训练模型下载解压后约为7.5G
+## PaddleX Python API离线训练
+> 通过如下代码先下载好PaddleX的所有预训练模型,下载完共约7.5G  
 ```
 from paddlex.cv.models.utils.pretrain_weights import image_pretrain
 from paddlex.cv.models.utils.pretrain_weights import coco_pretrain
+from paddlex.cv.models.utils.pretrain_weights import cityscapes_pretrain
 import paddlehub as hub
 
 save_dir = '/home/work/paddlex_pretrain'
@@ -30,4 +17,78 @@ for name, url in image_pretrain.items():
     hub.download(name, save_dir)
 for name, url in coco_pretrain.items():
     hub.download(name, save_dir)
+for name, url in cityscapes_pretrain.items():
+    hub.download(name, save_dir)
+```
+
+用户在可联网的机器上,执行如上代码,所有的预训练模型将会下载至指定的`save_dir`(代码示例中为`/home/work/paddlex_pretrain`),之后在通过Python代码使用PaddleX训练代码时,只需要在import paddlex的同时,配置如下参数,模型在训练时便会优先在此目录下寻找已经下载好的预训练模型。
+```
+import paddlex as pdx
+pdx.pretrain_dir = '/home/work/paddlex_pretrain'
+```
+
+## PaddleX GUI离线训练
+> PaddleX GUI在打开后,需要用户设定工作空间,假设当前用户设定的工作空间为`D:\PaddleX_Workspace`,为了离线训练,用户需手动下载如下所有文件(下载后无需再做解压操作)至`D:\PaddleX_Workspace\pretrain`目录,之后在训练模型时,便不再需要联网  
+```
+https://paddle-imagenet-models-name.bj.bcebos.com/ResNet18_pretrained.tar
+https://paddle-imagenet-models-name.bj.bcebos.com/ResNet34_pretrained.tar
+http://paddle-imagenet-models-name.bj.bcebos.com/ResNet50_pretrained.tar
+http://paddle-imagenet-models-name.bj.bcebos.com/ResNet101_pretrained.tar
+https://paddle-imagenet-models-name.bj.bcebos.com/ResNet50_vd_pretrained.tar
+https://paddle-imagenet-models-name.bj.bcebos.com/ResNet101_vd_pretrained.tar
+https://paddle-imagenet-models-name.bj.bcebos.com/ResNet50_vd_ssld_pretrained.tar
+https://paddle-imagenet-models-name.bj.bcebos.com/ResNet101_vd_ssld_pretrained.tar
+http://paddle-imagenet-models-name.bj.bcebos.com/MobileNetV1_pretrained.tar
+https://paddle-imagenet-models-name.bj.bcebos.com/MobileNetV2_pretrained.tar
+https://paddle-imagenet-models-name.bj.bcebos.com/MobileNetV2_x0_5_pretrained.tar
+https://paddle-imagenet-models-name.bj.bcebos.com/MobileNetV2_x2_0_pretrained.tar
+https://paddle-imagenet-models-name.bj.bcebos.com/MobileNetV2_x0_25_pretrained.tar
+https://paddle-imagenet-models-name.bj.bcebos.com/MobileNetV2_x1_5_pretrained.tar
+https://paddle-imagenet-models-name.bj.bcebos.com/MobileNetV3_small_x1_0_pretrained.tar
+https://paddle-imagenet-models-name.bj.bcebos.com/MobileNetV3_large_x1_0_pretrained.tar
+https://paddle-imagenet-models-name.bj.bcebos.com/MobileNetV3_small_x1_0_ssld_pretrained.tar
+https://paddle-imagenet-models-name.bj.bcebos.com/MobileNetV3_large_x1_0_ssld_pretrained.tar
+https://paddle-imagenet-models-name.bj.bcebos.com/DarkNet53_ImageNet1k_pretrained.tar
+https://paddle-imagenet-models-name.bj.bcebos.com/DenseNet121_pretrained.tar
+https://paddle-imagenet-models-name.bj.bcebos.com/DenseNet161_pretrained.tar
+https://paddle-imagenet-models-name.bj.bcebos.com/DenseNet201_pretrained.tar
+https://paddle-imagenet-models-name.bj.bcebos.com/ResNet50_cos_pretrained.tar
+https://paddle-imagenet-models-name.bj.bcebos.com/Xception41_deeplab_pretrained.tar
+https://paddle-imagenet-models-name.bj.bcebos.com/Xception65_deeplab_pretrained.tar
+https://paddle-imagenet-models-name.bj.bcebos.com/ShuffleNetV2_pretrained.tar
+https://paddle-imagenet-models-name.bj.bcebos.com/HRNet_W18_C_pretrained.tar
+https://paddle-imagenet-models-name.bj.bcebos.com/HRNet_W30_C_pretrained.tar
+https://paddle-imagenet-models-name.bj.bcebos.com/HRNet_W32_C_pretrained.tar
+https://paddle-imagenet-models-name.bj.bcebos.com/HRNet_W40_C_pretrained.tar
+https://paddle-imagenet-models-name.bj.bcebos.com/HRNet_W44_C_pretrained.tar
+https://paddle-imagenet-models-name.bj.bcebos.com/HRNet_W48_C_pretrained.tar
+https://paddle-imagenet-models-name.bj.bcebos.com/HRNet_W60_C_pretrained.tar
+https://paddle-imagenet-models-name.bj.bcebos.com/HRNet_W64_C_pretrained.tar
+http://paddle-imagenet-models-name.bj.bcebos.com/AlexNet_pretrained.tar
+https://paddlemodels.bj.bcebos.com/object_detection/yolov3_darknet.tar
+https://paddlemodels.bj.bcebos.com/object_detection/yolov3_mobilenet_v1.tar
+https://bj.bcebos.com/paddlex/models/yolov3_mobilenet_v3.tar
+https://paddlemodels.bj.bcebos.com/object_detection/yolov3_r34.tar
+https://paddlemodels.bj.bcebos.com/object_detection/yolov3_r50vd_dcn.tar
+https://bj.bcebos.com/paddlex/pretrained_weights/faster_rcnn_r18_fpn_1x.tar
+https://paddlemodels.bj.bcebos.com/object_detection/faster_rcnn_r50_fpn_2x.tar
+https://paddlemodels.bj.bcebos.com/object_detection/faster_rcnn_r50_vd_fpn_2x.tar
+https://paddlemodels.bj.bcebos.com/object_detection/faster_rcnn_r101_fpn_2x.tar
+https://paddlemodels.bj.bcebos.com/object_detection/faster_rcnn_r101_vd_fpn_2x.tar
+https://paddlemodels.bj.bcebos.com/object_detection/faster_rcnn_hrnetv2p_w18_2x.tar
+https://bj.bcebos.com/paddlex/pretrained_weights/mask_rcnn_r18_fpn_1x.tar
+https://paddlemodels.bj.bcebos.com/object_detection/mask_rcnn_r50_fpn_2x.tar
+https://paddlemodels.bj.bcebos.com/object_detection/mask_rcnn_r50_vd_fpn_2x.tar
+https://paddlemodels.bj.bcebos.com/object_detection/mask_rcnn_r101_fpn_1x.tar
+https://paddlemodels.bj.bcebos.com/object_detection/mask_rcnn_r101_vd_fpn_1x.tar
+https://bj.bcebos.com/paddlex/pretrained_weights/mask_rcnn_hrnetv2p_w18_2x.tar
+https://paddleseg.bj.bcebos.com/models/unet_coco_v3.tgz
+https://bj.bcebos.com/v1/paddleseg/deeplab_mobilenet_x1_0_coco.tgz
+https://paddleseg.bj.bcebos.com/models/xception65_coco.tgz
+https://paddlemodels.bj.bcebos.com/object_detection/ppyolo_2x.pdparams
+https://paddleseg.bj.bcebos.com/models/deeplabv3p_mobilenetv3_large_cityscapes.tar.gz
+https://paddleseg.bj.bcebos.com/models/mobilenet_cityscapes.tgz
+https://paddleseg.bj.bcebos.com/models/xception65_bn_cityscapes.tgz
+https://paddleseg.bj.bcebos.com/models/hrnet_w18_bn_cityscapes.tgz
+https://paddleseg.bj.bcebos.com/models/fast_scnn_cityscape.tar
 ```

+ 1 - 1
docs/appendix/index.rst

@@ -11,4 +11,4 @@
    metrics.md
    interpret.md
    parameters.md
-
+   how_to_offline_run.md

+ 2 - 2
docs/appendix/parameters.md

@@ -28,8 +28,8 @@ num_epochs是模型训练迭代的总轮数(模型对训练集全部样本过一
 ## 5.参数设定时的约束
 根据上述几个参数,可以了解到学习率的变化分为WarmUp热身阶段和Decay衰减阶段,
 > - Wamup热身阶段:随着训练迭代,学习率从较低的值逐渐线性增长至设定的值,以step为单位
-> - Decay衰减阶段:随着训练迭代,学习率逐步衰减,如每次衰减为之前的0.1, 以epoch为单位
-> step与epoch的关系:1个epoch由多个step组成,例如训练样本有800张图像,`train_batch_size`为8, 那么每个epoch都要完整用这800张图片训一次模型,而每个epoch总共包含800//8即100个step
+> - Decay衰减阶段:随着训练迭代,学习率逐步衰减,如每次衰减为之前的0.1, 以epoch为单位  
+> - step与epoch的关系:1个epoch由多个step组成,例如训练样本有800张图像,`train_batch_size`为8, 那么每个epoch都要完整用这800张图片训一次模型,而每个epoch总共包含800//8即100个step
 
 在PaddleX中,约束warmup必须在Decay之前结束,因此各参数设置需要满足下面条件
 ```

+ 11 - 11
docs/change_log.md

@@ -2,23 +2,23 @@
 
 **v1.2.0** 2020.09.07
 - 模型更新
-  > - 新增目标检测模型PPYOLO[详情链接](https://paddlex.readthedocs.io/zh_CN/develop/apis/models/detection.html#paddlex-det-ppyolo)
-  > - FasterRCNN、MaskRCNN、YOLOv3、DeepLabv3p等模型新增内置COCO数据集预训练模型
-  > - 目标检测模型FasterRCNN和MaskRCNN新增backbone HRNet_W18[详情链接](https://paddlex.readthedocs.io/zh_CN/develop/apis/models/detection.html#paddlex-det-fasterrcnn)
-  > - 语义分割模型DeepLabv3p新增backbone MobileNetV3_large_ssld[详情链接](https://paddlex.readthedocs.io/zh_CN/develop/apis/models/semantic_segmentation.html#paddlex-seg-deeplabv3p)
+  > - 新增产业最实用目标检测模型PP-YOLO,深入考虑产业应用对精度速度的双重面诉求,COCO数据集精度45.2%,Tesla V100预测速度72.9FPS。[详情链接](https://paddlex.readthedocs.io/zh_CN/develop/apis/models/detection.html#paddlex-det-ppyolo)
+  > - FasterRCNN、MaskRCNN、YOLOv3、DeepLabv3p等模型新增内置COCO数据集预训练模型,适用于小数据集的微调训练。
+  > - 目标检测模型FasterRCNN和MaskRCNN新增backbone HRNet_W18,适用于对细节预测要求较高的应用场景。[详情链接](https://paddlex.readthedocs.io/zh_CN/develop/apis/models/detection.html#paddlex-det-fasterrcnn)
+  > - 语义分割模型DeepLabv3p新增backbone MobileNetV3_large_ssld,模型体积9.3MB,Cityscapes数据集精度仍保持有73.28%。[详情链接](https://paddlex.readthedocs.io/zh_CN/develop/apis/models/semantic_segmentation.html#paddlex-seg-deeplabv3p)
 
 - 模型部署更新
-  > - 新增模型通过OpenVINO的部署方案[详情链接](https://paddlex.readthedocs.io/zh_CN/develop/deploy/openvino/index.html)
-  > - 新增模型在树莓派上的部署方案[详情链接](https://paddlex.readthedocs.io/zh_CN/develop/deploy/raspberry/index.html)
-  > - 优化PaddleLite Android部署的数据预处理和后处理代码性能
-  > - 优化Paddle服务端C++代码部署代码,增加use_mkl等参数,通过mkldnn显著提升模型在CPU上的预测性能
+  > - 新增模型通过OpenVINO预测加速的部署方案,CPU上相比mkldnn加速库预测速度提升1.5~2倍左右。[详情链接](https://paddlex.readthedocs.io/zh_CN/develop/deploy/openvino/index.html)
+  > - 新增模型在树莓派上的部署方案,进一步丰富边缘侧的部署方案。[详情链接](https://paddlex.readthedocs.io/zh_CN/develop/deploy/raspberry/index.html)
+  > - 优化PaddleLite Android部署的数据预处理和后处理代码性能,预处理速度提升10倍左右,后处理速度提升4倍左右。
+  > - 优化Paddle服务端C++代码部署代码,增加use_mkl等参数,CPU上相比未开启mkldnn预测速度提升10~50倍左右。
 
 - 产业案例更新
-  > - 新增RGB图像遥感分割案例[详情链接](https://paddlex.readthedocs.io/zh_CN/develop/examples/remote_sensing.html)
-  > - 新增多通道遥感分割案例[详情链接](https://paddlex.readthedocs.io/zh_CN/develop/examples/multi-channel_remote_sensing/README.html)
+  > - 新增大尺寸RGB图像遥感分割案例,提供滑动窗口预测接口,不仅能避免显存不足的发生,而且能通过配置重叠程度消除最终预测结果中各窗口拼接处的裂痕感。[详情链接](https://paddlex.readthedocs.io/zh_CN/develop/examples/remote_sensing.html)
+  > - 新增多通道遥感影像分割案例,打通语义分割任务对任意通道数量的数据分析、模型训练、模型部署全流程。[详情链接](https://paddlex.readthedocs.io/zh_CN/develop/examples/multi-channel_remote_sensing/README.html)
 
 - 其它
-  > - 新增数据集切分功能,支持通过命令行切分ImageNet、PascalVOC、MSCOCO和语义分割数据集[详情链接](https://paddlex.readthedocs.io/zh_CN/develop/data/format/classification.html#id2)
+  > - 新增数据集切分功能,支持通过命令行一键切分ImageNet、PascalVOC、MSCOCO和语义分割数据集[详情链接](https://paddlex.readthedocs.io/zh_CN/develop/data/format/classification.html#id2)
 
 **v1.1.0** 2020.07.12
 

+ 1 - 1
docs/deploy/openvino/export_openvino_model.md

@@ -22,7 +22,7 @@ paddlex --export_inference --model_dir=/path/to/paddle_model --save_dir=./infere
 ```
 cd /root/projects/python
 
-python convertor.py --model_dir /path/to/inference_model --save_dir /path/to/openvino_model --fixed_input_shape [w,h]
+python converter.py --model_dir /path/to/inference_model --save_dir /path/to/openvino_model --fixed_input_shape [w,h]
 ```
 **转换成功后会在save_dir下出现后缀名为.xml、.bin、.mapping三个文件**  
 转换参数说明如下:

+ 135 - 0
docs/deploy/server/cpp/api.md

@@ -0,0 +1,135 @@
+# C++代码接口说明
+
+## 头文件
+`include/paddlex/paddlex.h`
+
+## 类 PaddleX::Model
+
+模型类,用于加载PaddleX训练的模型。
+
+### 模型加载
+```
+PaddleX::Model::Init(const std::string& model_dir,
+                     bool use_gpu = false,
+                     bool use_trt = false,
+                     bool use_mkl = true,
+                     bool mkl_thread_num = 4,
+                     int gpu_id = 0,
+                     std::string key = "",
+                     bool use_ir_optim = true)
+```
+
+**参数**  
+- model_dir: 模型目录路径
+- use_gpu: 是否使用gpu预测
+- use_trt: 是否使用TensorRT
+- use_mkl: 是否使用MKLDNN加速模型在CPU上的预测性能
+- mkl_thread_num: 使用MKLDNN时,线程数量
+- gpu_id: 使用gpu的id号
+- key: 模型解密密钥,此参数用于加载加密的PaddleX模型时使用
+- use_ir_optim: 是否加速模型后进行图优化
+
+**返回值**
+- 返回true或false,表示模型是否加载成功
+
+### 模型预测推断
+
+**分类模型单张图片预测**
+```
+PaddleX::Model::predict(const cv::Mat& im, ClsResult* result)
+```
+**分类模型多张图片批预测**
+```
+PaddleX::Model::predict(const std::vector<cv::Mat>& im_batch, std::vector<ClsResult>* results)
+```
+**目标检测/实例分割模型单张图片预测**
+```
+PaddleX::Model::predict(const cv::Mat& im, DetResult* result)
+```
+**目标检测/实例分割模型多张图片批预测**
+```
+PaddleX::Model::predict(const std::vector<cv::Mat>& im_batch, std::vector<DetResult>* results)
+```
+**语义分割模型单张图片预测**
+```
+PaddleX::Model::predict(const cv::Mat& im, SegResult* result)
+```
+**语义分割模型多张图片批预测**
+```
+PaddleX::Model::predict(const std::vector<cv::Mat>& im_batch, std::vector<SegResult>* results)
+```
+各接口返回值为true或false,用于表示是否预测成功
+
+预测时,需传入cv::Mat结构体,结构需与如下示代码加载的结构体一致
+```
+cv::Mat im = cv::imread('test.jpg', 1);
+```
+当使用批预测时,注意会传入的vector中所有数据作为一个批次进行预测,因此vector越大,所需要使用的GPU显存会越高。
+
+预测时,同时传入ClsResult/DetResult/SegResult结构体,用于存放模型的预测结果,各结构体说明如下
+```
+// 分类模型预测结果
+class ClsResult {
+ public:
+  int category_id; // 类别id
+  std::string category; // 类别标签
+  float score; // 预测置信度
+  std::string type = "cls";
+}
+
+// 目标检测/实例分割模型预测结果
+class DetResult {
+ public:
+  std::vector<Box> boxes; // 预测结果中的各个目标框
+  int mask_resolution; 
+  std::string type = "det";
+}
+
+// 语义分割模型预测结果
+class SegResult : public BaseResult {
+ public:
+  Mask<int64_t> label_map; // 预测分割中各像素的类别
+  Mask<float> score_map; // 预测分割中各像素的置信度
+  std::string type = "seg";
+}
+
+struct Box {
+  int category_id; // 类别id
+  std::string category; // 类别标签
+  float score; // 置信度
+  std::vector<float> coordinate; // 4个元素值,表示xmin, ymin, width, height
+  Mask<int> mask; // 实例分割中,用于表示Box内的分割结果
+}
+
+struct Mask {
+  std::vector<T> data; // 分割中的label map或score map
+  std::vector<int> shape; // 表示分割图的shape
+}
+```
+
+## 预测结果可视化
+
+### 目标检测/实例分割结果可视化
+```
+PaddleX::Visualize(const cv::Mat& img, // 原图
+				   const DetResult& result, // 预测结果
+				   const std::map<int, std::string>& labels // 各类别信息<id, label_name>
+				  )
+```
+返回cv::Mat结构体,即为可视化后的结果
+
+### 语义分割结果可视化
+```
+PaddleX::Visualize(const cv::Mat& img, // 原图
+				   const SegResult& result, // 预测结果
+                   const std::map<int, std::string>& labels // 各类别信息<id, label_name>
+                  )
+```
+返回cv::Mat结构体,即为可视化后的结果
+
+
+## 代码示例
+
+- 图像分类 [PaddleX/deploy/cpp/demo/classifier.cpp](https://github.com/PaddlePaddle/PaddleX/blob/develop/deploy/cpp/demo/classifier.cpp)  
+- 目标检测/实例分割 [PaddleX/deploy/cpp/demo/detector.cpp](https://github.com/PaddlePaddle/PaddleX/blob/develop/deploy/cpp/demo/detector.cpp)
+- 语义分割 [PaddleX/deploy/cpp/demo/segmenter.cpp](https://github.com/PaddlePaddle/PaddleX/blob/develop/deploy/cpp/demo/segmenter.cpp)

+ 1 - 0
docs/deploy/server/cpp/index.rst

@@ -8,3 +8,4 @@ C++部署
 
    windows.md
    linux.md
+   api.md

BIN
docs/gui/.DS_Store


BIN
examples/remote_sensing/images/visualize_compare.png


+ 1 - 1
paddlex/__init__.py

@@ -29,7 +29,7 @@ from . import det
 from . import seg
 from . import cls
 from . import slim
-from . import convertor
+from . import converter
 from . import tools
 from . import deploy
 

+ 1 - 1
paddlex/command.py

@@ -168,7 +168,7 @@ def main():
             logging.error(
                 "paddlex --export_inference --model_dir model_path --save_dir infer_model"
             )
-        pdx.convertor.export_onnx_model(model, args.save_dir, args.onnx_opset)
+        pdx.converter.export_onnx_model(model, args.save_dir, args.onnx_opset)
 
     if args.data_conversion:
         assert args.source is not None, "--source should be defined while converting dataset"

+ 6 - 8
paddlex/convertor.py → paddlex/converter.py

@@ -37,19 +37,17 @@ def export_onnx_model(model, save_dir, opset_version=10):
             "Only image classifier models, detection models(YOLOv3) and semantic segmentation models(except FastSCNN) are supported to export to ONNX"
         )
     try:
-        import x2paddle
-        if x2paddle.__version__ < '0.7.4':
-            logging.error("You need to upgrade x2paddle >= 0.7.4")
+        import paddle2onnx 
     except:
         logging.error(
-            "You need to install x2paddle first, pip install x2paddle>=0.7.4")
+            "You need to install paddle2onnx first, pip install paddle2onnx")
+    import paddle2onnx as p2o
     if opset_version == 10 and model.__class__.__name__ == "YOLOv3":
         logging.warning(
-            "Export for openVINO by default, the output of multiclass_nms exported to onnx will contains background. If you need onnx completely consistent with paddle, please use X2Paddle to export"
+            "Export for openVINO by default, the output of multiclass_nms exported to onnx will contains background. If you need onnx completely consistent with paddle, please use paddle2onnx to export"
         )
-        x2paddle.op_mapper.paddle2onnx.opset10.paddle_custom_layer.multiclass_nms.multiclass_nms = multiclass_nms_for_openvino
-    from x2paddle.op_mapper.paddle2onnx.paddle_op_mapper import PaddleOpMapper
-    mapper = PaddleOpMapper()
+        p2o.op_mapper.opset9.paddle_custom_layer.multiclass_nms.multiclass_nms = multiclass_nms_for_openvino
+    mapper = p2o.PaddleOpMapper()
     mapper.convert(
         model.test_prog,
         save_dir,

+ 1 - 1
paddlex/cv/models/load_model.py

@@ -69,7 +69,7 @@ def load_model(model_dir, fixed_input_shape=None):
             if status == "Prune":
                 from .slim.prune import update_program
                 model.test_prog = update_program(model.test_prog, model_dir,
-                                                 model.places[0])
+                                                 model.places[0], scope=model_scope)
             import pickle
             with open(osp.join(model_dir, 'model.pdparams'), 'rb') as f:
                 load_dict = pickle.load(f)

+ 12 - 8
paddlex/cv/models/slim/prune.py

@@ -104,7 +104,7 @@ def sensitivity(program,
     return sensitivities
 
 
-def channel_prune(program, prune_names, prune_ratios, place, only_graph=False):
+def channel_prune(program, prune_names, prune_ratios, place, only_graph=False, scope=None):
     """通道裁剪。
 
     Args:
@@ -134,7 +134,8 @@ def channel_prune(program, prune_names, prune_ratios, place, only_graph=False):
             pruned_num = int(round(origin_num * (ratio)))
             prune_ratios[index] = ratio
         index += 1
-    scope = fluid.global_scope()
+    if scope is None:
+        scope = fluid.global_scope()
     pruner = Pruner()
     program, _, _ = pruner.prune(
         program,
@@ -175,12 +176,12 @@ def prune_program(model, prune_params_ratios=None):
         prune_params_ratios[prune_name] for prune_name in prune_names
     ]
     model.train_prog = channel_prune(train_prog, prune_names, prune_ratios,
-                                     place)
+                                     place, scope=model.scope)
     model.test_prog = channel_prune(
-        eval_prog, prune_names, prune_ratios, place, only_graph=True)
+        eval_prog, prune_names, prune_ratios, place, only_graph=True, scope=model.scope)
 
 
-def update_program(program, model_dir, place):
+def update_program(program, model_dir, place, scope=None):
     """根据裁剪信息更新Program和参数。
 
     Args:
@@ -197,10 +198,12 @@ def update_program(program, model_dir, place):
         shapes = yaml.load(f.read(), Loader=yaml.Loader)
     for param, shape in shapes.items():
         graph.var(param).set_shape(shape)
+    if scope is None:
+        scope = fluid.global_scope()
     for block in program.blocks:
         for param in block.all_parameters():
             if param.name in shapes:
-                param_tensor = fluid.global_scope().find_var(
+                param_tensor = scope.find_var(
                     param.name).get_tensor()
                 param_tensor.set(
                     np.zeros(list(shapes[param.name])).astype('float32'),
@@ -293,7 +296,7 @@ def get_params_ratios(sensitivities_file, eval_metric_loss=0.05):
     return params_ratios
 
 
-def cal_model_size(program, place, sensitivities_file, eval_metric_loss=0.05):
+def cal_model_size(program, place, sensitivities_file, eval_metric_loss=0.05, scope=None):
     """在可容忍的精度损失下,计算裁剪后模型大小相对于当前模型大小的比例。
 
     Args:
@@ -326,7 +329,8 @@ def cal_model_size(program, place, sensitivities_file, eval_metric_loss=0.05):
         list(prune_params_ratios.keys()),
         list(prune_params_ratios.values()),
         place,
-        only_graph=True)
+        only_graph=True,
+        scope=scope)
     origin_size = 0
     new_size = 0
     for var in program.list_vars():

+ 34 - 4
paddlex/cv/models/slim/prune_config.py

@@ -171,10 +171,14 @@ def get_prune_params(model):
             model_type.startswith('ShuffleNetV2'):
         for block in program.blocks:
             for param in block.all_parameters():
-                pd_var = fluid.global_scope().find_var(param.name)
-                pd_param = pd_var.get_tensor()
-                if len(np.array(pd_param).shape) == 4:
-                    prune_names.append(param.name)
+                pd_var = model.scope.find_var(param.name)
+                try:
+                    pd_param = pd_var.get_tensor()
+                    if len(np.array(pd_param).shape) == 4:
+                        prune_names.append(param.name)
+                except Exception as e:
+                    print("None Tensor Name: ", param.name)
+                    print("Error message: {}".format(e))
         if model_type == 'AlexNet':
             prune_names.remove('conv5_weights')
         if model_type == 'ShuffleNetV2':
@@ -285,11 +289,37 @@ def get_prune_params(model):
                 prune_names.remove(i)
 
     elif model_type.startswith('DeepLabv3p'):
+        if model_type.lower() == "deeplabv3p_mobilenetv3_large_x1_0_ssld":
+            params_not_prune = [
+                'last_1x1_conv_weights', 'conv14_se_2_weights',
+                'conv16_depthwise_weights', 'conv13_depthwise_weights',
+                'conv15_se_2_weights', 'conv2_depthwise_weights',
+                'conv6_depthwise_weights', 'conv8_depthwise_weights',
+                'fc_weights', 'conv3_depthwise_weights', 'conv7_se_2_weights',
+                'conv16_expand_weights', 'conv16_se_2_weights',
+                'conv10_depthwise_weights', 'conv11_depthwise_weights',
+                'conv15_expand_weights', 'conv5_expand_weights',
+                'conv15_depthwise_weights', 'conv14_depthwise_weights',
+                'conv12_se_2_weights', 'conv1_weights',
+                'conv13_expand_weights', 'conv_last_weights',
+                'conv12_depthwise_weights', 'conv13_se_2_weights',
+                'conv12_expand_weights', 'conv5_depthwise_weights',
+                'conv6_se_2_weights', 'conv10_expand_weights',
+                'conv9_depthwise_weights', 'conv6_expand_weights',
+                'conv5_se_2_weights', 'conv14_expand_weights',
+                'conv4_depthwise_weights', 'conv7_expand_weights',
+                'conv7_depthwise_weights', 'encoder/aspp0/weights',
+                'decoder/merge/weights', 'encoder/image_pool/weights',
+                'decoder/weights'
+            ]
         for param in program.global_block().all_parameters():
             if 'weight' not in param.name:
                 continue
             if 'dwise' in param.name or 'depthwise' in param.name or 'logit' in param.name:
                 continue
+            if model_type.lower() == "deeplabv3p_mobilenetv3_large_x1_0_ssld":
+                if param.name in params_not_prune:
+                    continue
             prune_names.append(param.name)
         params_not_prune = [
             'xception_{}/exit_flow/block2/separable_conv3/pointwise/weights'.

+ 1 - 1
paddlex/cv/models/slim/visualize.py

@@ -42,7 +42,7 @@ def visualize(model, sensitivities_file, save_dir='./'):
     y = list()
     for loss_thresh in tqdm.tqdm(list(np.arange(0.05, 1, 0.05))):
         prune_ratio = 1 - cal_model_size(
-            program, place, sensitivities_file, eval_metric_loss=loss_thresh)
+            program, place, sensitivities_file, eval_metric_loss=loss_thresh, scope=model.scope)
         x.append(prune_ratio)
         y.append(loss_thresh)
     plt.plot(x, y, color='green', linewidth=0.5, marker='o', markersize=3)

+ 10 - 7
paddlex/cv/models/utils/pretrain_weights.py

@@ -202,11 +202,11 @@ def get_pretrain_weights(flag, class_name, backbone, save_dir):
         assert backbone in image_pretrain, "There is not ImageNet pretrain weights for {}, you may try COCO.".format(
             backbone)
 
-        #        if backbone == 'AlexNet':
-        #            url = image_pretrain[backbone]
-        #            fname = osp.split(url)[-1].split('.')[0]
-        #            paddlex.utils.download_and_decompress(url, path=new_save_dir)
-        #            return osp.join(new_save_dir, fname)
+        if getattr(paddlex, 'gui_mode', False):
+            url = image_pretrain[backbone]
+            fname = osp.split(url)[-1].split('.')[0]
+            paddlex.utils.download_and_decompress(url, path=new_save_dir)
+            return osp.join(new_save_dir, fname)
         try:
             logging.info(
                 "Connecting PaddleHub server to get pretrain weights...")
@@ -241,8 +241,11 @@ def get_pretrain_weights(flag, class_name, backbone, save_dir):
         elif flag == 'CITYSCAPES':
             url = cityscapes_pretrain[backbone]
         fname = osp.split(url)[-1].split('.')[0]
-        #        paddlex.utils.download_and_decompress(url, path=new_save_dir)
-        #        return osp.join(new_save_dir, fname)
+
+        if getattr(paddlex, 'gui_mode', False):
+            paddlex.utils.download_and_decompress(url, path=new_save_dir)
+            return osp.join(new_save_dir, fname)
+
         try:
             logging.info(
                 "Connecting PaddleHub server to get pretrain weights...")

+ 3 - 1
paddlex/cv/models/utils/visualize.py

@@ -93,7 +93,9 @@ def visualize_segmentation(image,
     if abs(weight) < 1e-5:
         vis_result = pseudo_img
     else:
-        vis_result = cv2.addWeighted(im, weight, pseudo_img, 1 - weight, 0)
+        vis_result = cv2.addWeighted(im, weight,
+                                     pseudo_img.astype('float32'), 1 - weight,
+                                     0)
 
     if save_dir is not None:
         if not os.path.exists(save_dir):

+ 10 - 4
paddlex/deploy.py

@@ -247,13 +247,16 @@ class Predictor:
                 [output_tensor.copy_to_cpu(), output_tensor_lod])
         return output_results
 
-    def predict(self, image, topk=1):
+    def predict(self, image, topk=1, transforms=None):
         """ 图片预测
 
             Args:
                 image(str|np.ndarray): 图像路径;或者是解码后的排列格式为(H, W, C)且类型为float32且为BGR格式的数组。
-                topk(int): 分类预测时使用,表示预测前topk的结果
+                topk(int): 分类预测时使用,表示预测前topk的结果。
+                transforms (paddlex.cls.transforms): 数据预处理操作。
         """
+        if transforms is not None:
+            self.transforms = transforms
         preprocessed_input = self.preprocess([image])
         model_pred = self.raw_predict(preprocessed_input)
         im_shape = None if 'im_shape' not in preprocessed_input else preprocessed_input[
@@ -269,15 +272,18 @@ class Predictor:
 
         return results[0]
 
-    def batch_predict(self, image_list, topk=1):
+    def batch_predict(self, image_list, topk=1, transforms=None):
         """ 图片预测
 
             Args:
                 image_list(list|tuple): 对列表(或元组)中的图像同时进行预测,列表中的元素可以是图像路径
                     也可以是解码后的排列格式为(H,W,C)且类型为float32且为BGR格式的数组。
 
-                topk(int): 分类预测时使用,表示预测前topk的结果
+                topk(int): 分类预测时使用,表示预测前topk的结果。
+                transforms (paddlex.cls.transforms): 数据预处理操作。
         """
+        if transforms is not None:
+            self.transforms = transforms
         preprocessed_input = self.preprocess(image_list, self.thread_pool)
         model_pred = self.raw_predict(preprocessed_input)
         im_shape = None if 'im_shape' not in preprocessed_input else preprocessed_input[

+ 1 - 1
requirements.txt

@@ -6,6 +6,6 @@ pycocotools
 visualdl >= 2.0.0b
 paddleslim == 1.0.1
 shapely
-x2paddle
+paddle2onnx
 paddlepaddle-gpu
 opencv-python