Browse Source

update copy right

mamingjie-China 5 years ago
parent
commit
12db91d878
87 changed files with 458 additions and 440 deletions
  1. 1 1
      deploy/lite/export_lite.py
  2. 1 1
      paddlex/cls.py
  3. 1 1
      paddlex/command.py
  4. 1 1
      paddlex/convertor.py
  5. 1 1
      paddlex/cv/__init__.py
  6. 1 1
      paddlex/cv/datasets/__init__.py
  7. 1 1
      paddlex/cv/datasets/coco.py
  8. 2 3
      paddlex/cv/datasets/dataset.py
  9. 1 1
      paddlex/cv/datasets/easydata_cls.py
  10. 23 32
      paddlex/cv/datasets/easydata_det.py
  11. 7 5
      paddlex/cv/datasets/easydata_seg.py
  12. 1 1
      paddlex/cv/datasets/imagenet.py
  13. 1 1
      paddlex/cv/datasets/shared_queue/__init__.py
  14. 1 1
      paddlex/cv/datasets/shared_queue/queue.py
  15. 5 5
      paddlex/cv/datasets/shared_queue/sharedmemory.py
  16. 1 1
      paddlex/cv/datasets/voc.py
  17. 1 1
      paddlex/cv/models/__init__.py
  18. 14 9
      paddlex/cv/models/base.py
  19. 1 1
      paddlex/cv/models/classifier.py
  20. 1 1
      paddlex/cv/models/deeplabv3p.py
  21. 1 1
      paddlex/cv/models/fast_scnn.py
  22. 1 1
      paddlex/cv/models/faster_rcnn.py
  23. 1 1
      paddlex/cv/models/hrnet.py
  24. 3 3
      paddlex/cv/models/load_model.py
  25. 8 6
      paddlex/cv/models/mask_rcnn.py
  26. 6 4
      paddlex/cv/models/slim/post_quantization.py
  27. 3 3
      paddlex/cv/models/slim/prune.py
  28. 1 1
      paddlex/cv/models/slim/prune_config.py
  29. 2 3
      paddlex/cv/models/slim/visualize.py
  30. 4 4
      paddlex/cv/models/unet.py
  31. 19 17
      paddlex/cv/models/utils/detection_eval.py
  32. 7 7
      paddlex/cv/models/utils/seg_eval.py
  33. 4 3
      paddlex/cv/models/utils/visualize.py
  34. 1 1
      paddlex/cv/models/yolo_v3.py
  35. 1 1
      paddlex/cv/nets/__init__.py
  36. 1 1
      paddlex/cv/nets/alexnet.py
  37. 1 1
      paddlex/cv/nets/backbone_utils.py
  38. 4 4
      paddlex/cv/nets/densenet.py
  39. 1 1
      paddlex/cv/nets/detection/__init__.py
  40. 1 1
      paddlex/cv/nets/detection/yolo_v3.py
  41. 1 1
      paddlex/cv/nets/mobilenet_v1.py
  42. 10 11
      paddlex/cv/nets/mobilenet_v2.py
  43. 1 1
      paddlex/cv/nets/resnet.py
  44. 1 1
      paddlex/cv/nets/segmentation/__init__.py
  45. 1 1
      paddlex/cv/nets/segmentation/deeplabv3p.py
  46. 1 1
      paddlex/cv/nets/segmentation/fast_scnn.py
  47. 1 1
      paddlex/cv/nets/segmentation/hrnet.py
  48. 1 1
      paddlex/cv/nets/segmentation/model_utils/__init__.py
  49. 5 3
      paddlex/cv/nets/segmentation/model_utils/libs.py
  50. 6 5
      paddlex/cv/nets/segmentation/model_utils/loss.py
  51. 1 1
      paddlex/cv/nets/segmentation/unet.py
  52. 12 10
      paddlex/cv/nets/shufflenet_v2.py
  53. 20 21
      paddlex/cv/nets/xception.py
  54. 1 1
      paddlex/cv/transforms/__init__.py
  55. 3 2
      paddlex/cv/transforms/box_utils.py
  56. 1 1
      paddlex/cv/transforms/cls_transforms.py
  57. 1 1
      paddlex/cv/transforms/det_transforms.py
  58. 1 1
      paddlex/cv/transforms/imgaug_support.py
  59. 4 3
      paddlex/cv/transforms/ops.py
  60. 1 1
      paddlex/cv/transforms/seg_transforms.py
  61. 168 159
      paddlex/cv/transforms/visualize.py
  62. 1 1
      paddlex/deploy.py
  63. 1 1
      paddlex/det.py
  64. 1 1
      paddlex/interpret/__init__.py
  65. 1 1
      paddlex/interpret/as_data_reader/__init__.py
  66. 5 4
      paddlex/interpret/as_data_reader/data_path_utils.py
  67. 13 8
      paddlex/interpret/as_data_reader/readers.py
  68. 1 1
      paddlex/interpret/core/__init__.py
  69. 4 4
      paddlex/interpret/core/_session_preparation.py
  70. 4 4
      paddlex/interpret/core/interpretation.py
  71. 4 4
      paddlex/interpret/core/interpretation_algorithms.py
  72. 4 4
      paddlex/interpret/core/normlime_base.py
  73. 1 1
      paddlex/interpret/interpretation_predict.py
  74. 1 1
      paddlex/interpret/visualize.py
  75. 1 1
      paddlex/seg.py
  76. 1 1
      paddlex/slim.py
  77. 4 4
      paddlex/utils/__init__.py
  78. 4 3
      paddlex/utils/logging.py
  79. 3 3
      paddlex/utils/save.py
  80. 3 1
      paddlex/utils/utils.py
  81. 1 1
      setup.py
  82. 4 4
      tutorials/compress/classification/cal_sensitivities_file.py
  83. 6 8
      tutorials/compress/classification/mobilenetv2.py
  84. 1 1
      tutorials/compress/detection/cal_sensitivities_file.py
  85. 7 10
      tutorials/compress/detection/yolov3_mobilenet.py
  86. 1 1
      tutorials/compress/segmentation/cal_sensitivities_file.py
  87. 4 7
      tutorials/compress/segmentation/unet.py

+ 1 - 1
deploy/lite/export_lite.py

@@ -1,4 +1,4 @@
-#copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
+#copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
 #
 #Licensed under the Apache License, Version 2.0 (the "License");
 #you may not use this file except in compliance with the License.

+ 1 - 1
paddlex/cls.py

@@ -1,4 +1,4 @@
-# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
+# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.

+ 1 - 1
paddlex/command.py

@@ -1,4 +1,4 @@
-# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
+# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.

+ 1 - 1
paddlex/convertor.py

@@ -1,4 +1,4 @@
-# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
+# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.

+ 1 - 1
paddlex/cv/__init__.py

@@ -1,4 +1,4 @@
-# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
+# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.

+ 1 - 1
paddlex/cv/datasets/__init__.py

@@ -1,4 +1,4 @@
-# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
+# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.

+ 1 - 1
paddlex/cv/datasets/coco.py

@@ -1,4 +1,4 @@
-# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
+# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.

+ 2 - 3
paddlex/cv/datasets/dataset.py

@@ -1,4 +1,4 @@
-# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
+# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -218,14 +218,13 @@ def generate_minibatch(batch_data, label_padding_value=255):
             (im_c, max_shape[1], max_shape[2]), dtype=np.float32)
         padding_im[:, :im_h, :im_w] = data[0]
         if len(data) > 2:
-           # padding the image, label and insert 'padding' into `im_info` of segmentation during evaluating phase.
+            # padding the image, label and insert 'padding' into `im_info` of segmentation during evaluating phase.
             if len(data[1]) == 0 or 'padding' not in [
                     data[1][i][0] for i in range(len(data[1]))
             ]:
                 data[1].append(('padding', [im_h, im_w]))
             padding_batch.append((padding_im, data[1], data[2]))
 
-            
         elif len(data) > 1:
             if isinstance(data[1], np.ndarray) and len(data[1].shape) > 1:
                 # padding the image and label of segmentation during the training

+ 1 - 1
paddlex/cv/datasets/easydata_cls.py

@@ -1,4 +1,4 @@
-# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
+# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.

+ 23 - 32
paddlex/cv/datasets/easydata_det.py

@@ -1,4 +1,4 @@
-# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
+# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -25,6 +25,7 @@ from .voc import VOCDetection
 from .dataset import is_pic
 from .dataset import get_encoding
 
+
 class EasyDataDet(VOCDetection):
     """读取EasyDataDet格式的检测数据集,并对样本进行相应的处理。
 
@@ -41,7 +42,7 @@ class EasyDataDet(VOCDetection):
             线程和'process'进程两种方式。默认为'process'(Windows和Mac下会强制使用thread,该参数无效)。
         shuffle (bool): 是否需要对数据集中样本打乱顺序。默认为False。
     """
-    
+
     def __init__(self,
                  data_dir,
                  file_list,
@@ -60,12 +61,12 @@ class EasyDataDet(VOCDetection):
         self.file_list = list()
         self.labels = list()
         self._epoch = 0
-        
+
         annotations = {}
         annotations['images'] = []
         annotations['categories'] = []
         annotations['annotations'] = []
-        
+
         cname2cid = {}
         label_id = 1
         with open(label_list, encoding=get_encoding(label_list)) as fr:
@@ -80,7 +81,7 @@ class EasyDataDet(VOCDetection):
                 'id': v,
                 'name': k
             })
-            
+
         from pycocotools.mask import decode
         ct = 0
         ann_ct = 0
@@ -95,8 +96,8 @@ class EasyDataDet(VOCDetection):
                 if not osp.isfile(json_file):
                     continue
                 if not osp.exists(img_file):
-                    raise IOError(
-                        'The image file {} is not exist!'.format(img_file))
+                    raise IOError('The image file {} is not exist!'.format(
+                        img_file))
                 with open(json_file, mode='r', \
                           encoding=get_encoding(json_file)) as j:
                     json_info = json.load(j)
@@ -127,21 +128,15 @@ class EasyDataDet(VOCDetection):
                         mask = decode(mask_dict)
                         gt_poly[i] = self.mask2polygon(mask)
                     annotations['annotations'].append({
-                        'iscrowd':
-                        0,
-                        'image_id':
-                        int(im_id[0]),
+                        'iscrowd': 0,
+                        'image_id': int(im_id[0]),
                         'bbox': [x1, y1, x2 - x1 + 1, y2 - y1 + 1],
-                        'area':
-                        float((x2 - x1 + 1) * (y2 - y1 + 1)),
-                        'segmentation':
-                        [[x1, y1, x1, y2, x2, y2, x2, y1]] if gt_poly[i] is None else gt_poly[i],
-                        'category_id':
-                        cname2cid[cname],
-                        'id':
-                        ann_ct,
-                        'difficult':
-                        0
+                        'area': float((x2 - x1 + 1) * (y2 - y1 + 1)),
+                        'segmentation': [[x1, y1, x1, y2, x2, y2, x2, y1]]
+                        if gt_poly[i] is None else gt_poly[i],
+                        'category_id': cname2cid[cname],
+                        'id': ann_ct,
+                        'difficult': 0
                     })
                     ann_ct += 1
                 im_info = {
@@ -162,14 +157,10 @@ class EasyDataDet(VOCDetection):
                     self.file_list.append([img_file, voc_rec])
                     ct += 1
                     annotations['images'].append({
-                        'height':
-                        im_h,
-                        'width':
-                        im_w,
-                        'id':
-                        int(im_id[0]),
-                        'file_name':
-                        osp.split(img_file)[1]
+                        'height': im_h,
+                        'width': im_w,
+                        'id': int(im_id[0]),
+                        'file_name': osp.split(img_file)[1]
                     })
 
         if not len(self.file_list) > 0:
@@ -181,13 +172,13 @@ class EasyDataDet(VOCDetection):
         self.coco_gt = COCO()
         self.coco_gt.dataset = annotations
         self.coco_gt.createIndex()
-        
+
     def mask2polygon(self, mask):
         contours, hierarchy = cv2.findContours(
-            (mask).astype(np.uint8), cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
+            (mask).astype(np.uint8), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
         segmentation = []
         for contour in contours:
             contour_list = contour.flatten().tolist()
             if len(contour_list) > 4:
                 segmentation.append(contour_list)
-        return segmentation
+        return segmentation

+ 7 - 5
paddlex/cv/datasets/easydata_seg.py

@@ -1,4 +1,4 @@
-# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
+# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -25,6 +25,7 @@ from .dataset import Dataset
 from .dataset import get_encoding
 from .dataset import is_pic
 
+
 class EasyDataSeg(Dataset):
     """读取EasyDataSeg语义分割任务数据集,并对样本进行相应的处理。
 
@@ -67,7 +68,7 @@ class EasyDataSeg(Dataset):
                 cname2cid[line.strip()] = label_id
                 label_id += 1
                 self.labels.append(line.strip())
-                
+
         with open(file_list, encoding=get_encoding(file_list)) as f:
             for line in f:
                 img_file, json_file = [osp.join(data_dir, x) \
@@ -79,8 +80,8 @@ class EasyDataSeg(Dataset):
                 if not osp.isfile(json_file):
                     continue
                 if not osp.exists(img_file):
-                    raise IOError(
-                        'The image file {} is not exist!'.format(img_file))
+                    raise IOError('The image file {} is not exist!'.format(
+                        img_file))
                 with open(json_file, mode='r', \
                           encoding=get_encoding(json_file)) as j:
                     json_info = json.load(j)
@@ -97,7 +98,8 @@ class EasyDataSeg(Dataset):
                     mask_dict['counts'] = obj['mask'].encode()
                     mask = decode(mask_dict)
                     mask *= cid
-                    conflict_index = np.where(((lable_npy > 0) & (mask == cid)) == True)
+                    conflict_index = np.where(((lable_npy > 0) &
+                                               (mask == cid)) == True)
                     mask[conflict_index] = 0
                     lable_npy += mask
                 self.file_list.append([img_file, lable_npy])

+ 1 - 1
paddlex/cv/datasets/imagenet.py

@@ -1,4 +1,4 @@
-# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
+# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.

+ 1 - 1
paddlex/cv/datasets/shared_queue/__init__.py

@@ -1,4 +1,4 @@
-# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
+# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.

+ 1 - 1
paddlex/cv/datasets/shared_queue/queue.py

@@ -1,4 +1,4 @@
-# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
+# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.

+ 5 - 5
paddlex/cv/datasets/shared_queue/sharedmemory.py

@@ -1,4 +1,4 @@
-# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
+# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -278,8 +278,8 @@ class PageAllocator(object):
     def set_alloc_info(self, alloc_pos, used_pages):
         """ set allocating position to new value
         """
-        memcopy(self._base[4:12], struct.pack(
-            str('II'), alloc_pos, used_pages))
+        memcopy(self._base[4:12],
+                struct.pack(str('II'), alloc_pos, used_pages))
 
     def set_page_status(self, start, page_num, status):
         """ set pages from 'start' to 'end' with new same status 'status'
@@ -525,8 +525,8 @@ class SharedMemoryMgr(object):
             logger.info('destroy [%s]' % (self))
 
         if not self._released and not self._allocator.empty():
-            logger.debug(
-                'not empty when delete this SharedMemoryMgr[%s]' % (self))
+            logger.debug('not empty when delete this SharedMemoryMgr[%s]' %
+                         (self))
         else:
             self._released = True
 

+ 1 - 1
paddlex/cv/datasets/voc.py

@@ -1,4 +1,4 @@
-# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
+# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.

+ 1 - 1
paddlex/cv/models/__init__.py

@@ -1,4 +1,4 @@
-# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
+# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.

+ 14 - 9
paddlex/cv/models/base.py

@@ -1,4 +1,4 @@
-# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
+# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -246,8 +246,8 @@ class BaseAPI:
             logging.info(
                 "Load pretrain weights from {}.".format(pretrain_weights),
                 use_color=True)
-            paddlex.utils.utils.load_pretrain_weights(self.exe, self.train_prog,
-                                                      pretrain_weights, fuse_bn)
+            paddlex.utils.utils.load_pretrain_weights(
+                self.exe, self.train_prog, pretrain_weights, fuse_bn)
         # 进行裁剪
         if sensitivities_file is not None:
             import paddleslim
@@ -351,7 +351,9 @@ class BaseAPI:
         logging.info("Model saved in {}.".format(save_dir))
 
     def export_inference_model(self, save_dir):
-        test_input_names = [var.name for var in list(self.test_inputs.values())]
+        test_input_names = [
+            var.name for var in list(self.test_inputs.values())
+        ]
         test_outputs = list(self.test_outputs.values())
         with fluid.scope_guard(self.scope):
             if self.__class__.__name__ == 'MaskRCNN':
@@ -389,7 +391,8 @@ class BaseAPI:
 
         # 模型保存成功的标志
         open(osp.join(save_dir, '.success'), 'w').close()
-        logging.info("Model for inference deploy saved in {}.".format(save_dir))
+        logging.info("Model for inference deploy saved in {}.".format(
+            save_dir))
 
     def train_loop(self,
                    num_epochs,
@@ -516,11 +519,13 @@ class BaseAPI:
                         eta = ((num_epochs - i) * total_num_steps - step - 1
                                ) * avg_step_time
                     if time_eval_one_epoch is not None:
-                        eval_eta = (total_eval_times - i // save_interval_epochs
-                                    ) * time_eval_one_epoch
+                        eval_eta = (
+                            total_eval_times - i // save_interval_epochs
+                        ) * time_eval_one_epoch
                     else:
-                        eval_eta = (total_eval_times - i // save_interval_epochs
-                                    ) * total_num_steps_eval * avg_step_time
+                        eval_eta = (
+                            total_eval_times - i // save_interval_epochs
+                        ) * total_num_steps_eval * avg_step_time
                     eta_str = seconds_to_hms(eta + eval_eta)
 
                     logging.info(

+ 1 - 1
paddlex/cv/models/classifier.py

@@ -1,4 +1,4 @@
-# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
+# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.

+ 1 - 1
paddlex/cv/models/deeplabv3p.py

@@ -1,4 +1,4 @@
-# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
+# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.

+ 1 - 1
paddlex/cv/models/fast_scnn.py

@@ -1,4 +1,4 @@
-# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
+# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.

+ 1 - 1
paddlex/cv/models/faster_rcnn.py

@@ -1,4 +1,4 @@
-# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
+# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.

+ 1 - 1
paddlex/cv/models/hrnet.py

@@ -1,4 +1,4 @@
-# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
+# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.

+ 3 - 3
paddlex/cv/models/load_model.py

@@ -1,4 +1,4 @@
-# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
+# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -103,8 +103,8 @@ def load_model(model_dir, fixed_input_shape=None):
                 model.model_type, info['Transforms'], info['BatchTransforms'])
             model.eval_transforms = copy.deepcopy(model.test_transforms)
         else:
-            model.test_transforms = build_transforms(model.model_type,
-                                                     info['Transforms'], to_rgb)
+            model.test_transforms = build_transforms(
+                model.model_type, info['Transforms'], to_rgb)
             model.eval_transforms = copy.deepcopy(model.test_transforms)
 
     if '_Attributes' in info:

+ 8 - 6
paddlex/cv/models/mask_rcnn.py

@@ -1,4 +1,4 @@
-# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
+# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -280,8 +280,9 @@ class MaskRCNN(FasterRCNN):
 
         total_steps = math.ceil(eval_dataset.num_samples * 1.0 / batch_size)
         results = list()
-        logging.info("Start to evaluating(total_samples={}, total_steps={})...".
-                     format(eval_dataset.num_samples, total_steps))
+        logging.info(
+            "Start to evaluating(total_samples={}, total_steps={})...".format(
+                eval_dataset.num_samples, total_steps))
         for step, data in tqdm.tqdm(
                 enumerate(data_generator()), total=total_steps):
             images = np.array([d[0] for d in data]).astype('float32')
@@ -325,7 +326,8 @@ class MaskRCNN(FasterRCNN):
                     zip(['bbox_map', 'segm_map'],
                         [ap_stats[0][1], ap_stats[1][1]]))
             else:
-                metrics = OrderedDict(zip(['bbox_map', 'segm_map'], [0.0, 0.0]))
+                metrics = OrderedDict(
+                    zip(['bbox_map', 'segm_map'], [0.0, 0.0]))
         elif metric == 'COCO':
             if isinstance(ap_stats[0], np.ndarray) and isinstance(ap_stats[1],
                                                                   np.ndarray):
@@ -429,8 +431,8 @@ class MaskRCNN(FasterRCNN):
         if transforms is None:
             transforms = self.test_transforms
         im, im_resize_info, im_shape = FasterRCNN._preprocess(
-            img_file_list, transforms, self.model_type, self.__class__.__name__,
-            thread_num)
+            img_file_list, transforms, self.model_type,
+            self.__class__.__name__, thread_num)
 
         with fluid.scope_guard(self.scope):
             result = self.exe.run(self.test_prog,

+ 6 - 4
paddlex/cv/models/slim/post_quantization.py

@@ -1,4 +1,4 @@
-# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
+# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -80,7 +80,9 @@ class PaddleXPostTrainingQuantization(PostTrainingQuantization):
         self._support_activation_quantize_type = [
             'range_abs_max', 'moving_average_abs_max', 'abs_max'
         ]
-        self._support_weight_quantize_type = ['abs_max', 'channel_wise_abs_max']
+        self._support_weight_quantize_type = [
+            'abs_max', 'channel_wise_abs_max'
+        ]
         self._support_algo_type = ['KL', 'abs_max', 'min_max']
         self._support_quantize_op_type = \
             list(set(QuantizationTransformPass._supported_quantizable_op_type +
@@ -240,8 +242,8 @@ class PaddleXPostTrainingQuantization(PostTrainingQuantization):
                 '[Calculate weight] Weight_id={}/{}, time_each_weight={} s.'.
                 format(
                     str(ct),
-                    str(len(self._quantized_weight_var_name)), str(end -
-                                                                   start)))
+                    str(len(self._quantized_weight_var_name)),
+                    str(end - start)))
             ct += 1
 
         ct = 1

+ 3 - 3
paddlex/cv/models/slim/prune.py

@@ -1,4 +1,4 @@
-# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
+# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -288,8 +288,8 @@ def get_params_ratios(sensitivities_file, eval_metric_loss=0.05):
     if not osp.exists(sensitivities_file):
         raise Exception('The sensitivities file is not exists!')
     sensitivitives = paddleslim.prune.load_sensitivities(sensitivities_file)
-    params_ratios = paddleslim.prune.get_ratios_by_loss(
-        sensitivitives, eval_metric_loss)
+    params_ratios = paddleslim.prune.get_ratios_by_loss(sensitivitives,
+                                                        eval_metric_loss)
     return params_ratios
 
 

+ 1 - 1
paddlex/cv/models/slim/prune_config.py

@@ -1,4 +1,4 @@
-# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
+# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.

+ 2 - 3
paddlex/cv/models/slim/visualize.py

@@ -1,4 +1,4 @@
-# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
+# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -47,8 +47,7 @@ def visualize(model, sensitivities_file, save_dir='./'):
         y.append(loss_thresh)
     plt.plot(x, y, color='green', linewidth=0.5, marker='o', markersize=3)
     my_x_ticks = np.arange(
-        min(np.array(x)) - 0.01,
-        max(np.array(x)) + 0.01, 0.05)
+        min(np.array(x)) - 0.01, max(np.array(x)) + 0.01, 0.05)
     my_y_ticks = np.arange(0.05, 1, 0.05)
     plt.xticks(my_x_ticks, rotation=15, fontsize=8)
     plt.yticks(my_y_ticks, fontsize=8)

+ 4 - 4
paddlex/cv/models/unet.py

@@ -1,11 +1,11 @@
-# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
-# 
+# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
+#
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
 # You may obtain a copy of the License at
-# 
+#
 #     http://www.apache.org/licenses/LICENSE-2.0
-# 
+#
 # Unless required by applicable law or agreed to in writing, software
 # distributed under the License is distributed on an "AS IS" BASIS,
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.

+ 19 - 17
paddlex/cv/models/utils/detection_eval.py

@@ -1,11 +1,11 @@
-# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
-# 
+# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
+#
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
 # You may obtain a copy of the License at
-# 
+#
 #     http://www.apache.org/licenses/LICENSE-2.0
-# 
+#
 # Unless required by applicable law or agreed to in writing, software
 # distributed under the License is distributed on an "AS IS" BASIS,
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
@@ -158,8 +158,8 @@ def loadRes(coco_obj, anns):
         for id, ann in enumerate(anns):
             ann['id'] = id + 1
     elif 'bbox' in anns[0] and not anns[0]['bbox'] == []:
-        res.dataset['categories'] = copy.deepcopy(
-            coco_obj.dataset['categories'])
+        res.dataset['categories'] = copy.deepcopy(coco_obj.dataset[
+            'categories'])
         for id, ann in enumerate(anns):
             bb = ann['bbox']
             x1, x2, y1, y2 = [bb[0], bb[0] + bb[2], bb[1], bb[1] + bb[3]]
@@ -169,8 +169,8 @@ def loadRes(coco_obj, anns):
             ann['id'] = id + 1
             ann['iscrowd'] = 0
     elif 'segmentation' in anns[0]:
-        res.dataset['categories'] = copy.deepcopy(
-            coco_obj.dataset['categories'])
+        res.dataset['categories'] = copy.deepcopy(coco_obj.dataset[
+            'categories'])
         for id, ann in enumerate(anns):
             # now only support compressed RLE format as segmentation results
             ann['area'] = maskUtils.area(ann['segmentation'])
@@ -179,8 +179,8 @@ def loadRes(coco_obj, anns):
             ann['id'] = id + 1
             ann['iscrowd'] = 0
     elif 'keypoints' in anns[0]:
-        res.dataset['categories'] = copy.deepcopy(
-            coco_obj.dataset['categories'])
+        res.dataset['categories'] = copy.deepcopy(coco_obj.dataset[
+            'categories'])
         for id, ann in enumerate(anns):
             s = ann['keypoints']
             x = s[0::3]
@@ -375,8 +375,8 @@ def mask2out(results, clsid2catid, resolution, thresh_binarize=0.5):
             expand_bbox = expand_boxes(bbox, scale)
             expand_bbox = expand_bbox.astype(np.int32)
 
-            padded_mask = np.zeros((resolution + 2, resolution + 2),
-                                   dtype=np.float32)
+            padded_mask = np.zeros(
+                (resolution + 2, resolution + 2), dtype=np.float32)
 
             for j in range(num):
                 xmin, ymin, xmax, ymax = expand_bbox[j].tolist()
@@ -404,7 +404,8 @@ def mask2out(results, clsid2catid, resolution, thresh_binarize=0.5):
                 im_mask[y0:y1, x0:x1] = resized_mask[(y0 - ymin):(y1 - ymin), (
                     x0 - xmin):(x1 - xmin)]
                 segm = mask_util.encode(
-                    np.array(im_mask[:, :, np.newaxis], order='F'))[0]
+                    np.array(
+                        im_mask[:, :, np.newaxis], order='F'))[0]
                 catid = clsid2catid[clsid]
                 segm['counts'] = segm['counts'].decode('utf8')
                 coco_res = {
@@ -571,8 +572,8 @@ def prune_zero_padding(gt_box, gt_label, difficult=None):
                 gt_box[i, 2] == 0 and gt_box[i, 3] == 0:
             break
         valid_cnt += 1
-    return (gt_box[:valid_cnt], gt_label[:valid_cnt],
-            difficult[:valid_cnt] if difficult is not None else None)
+    return (gt_box[:valid_cnt], gt_label[:valid_cnt], difficult[:valid_cnt]
+            if difficult is not None else None)
 
 
 def bbox_area(bbox, is_bbox_normalized):
@@ -694,8 +695,9 @@ class DetectionMAP(object):
         """
         mAP = 0.
         valid_cnt = 0
-        for id, (score_pos, count) in enumerate(
-                zip(self.class_score_poss, self.class_gt_counts)):
+        for id, (
+                score_pos, count
+        ) in enumerate(zip(self.class_score_poss, self.class_gt_counts)):
             if count == 0: continue
             if len(score_pos) == 0:
                 valid_cnt += 1

+ 7 - 7
paddlex/cv/models/utils/seg_eval.py

@@ -1,4 +1,4 @@
-# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
+# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -24,8 +24,8 @@ class ConfusionMatrix(object):
     """
 
     def __init__(self, num_classes=2, streaming=False):
-        self.confusion_matrix = np.zeros([num_classes, num_classes],
-                                         dtype='int64')
+        self.confusion_matrix = np.zeros(
+            [num_classes, num_classes], dtype='int64')
         self.num_classes = num_classes
         self.streaming = streaming
 
@@ -42,15 +42,15 @@ class ConfusionMatrix(object):
         pred = np.asarray(pred)[mask]
         one = np.ones_like(pred)
         # Accumuate ([row=label, col=pred], 1) into sparse matrix
-        spm = csr_matrix((one, (label, pred)),
-                         shape=(self.num_classes, self.num_classes))
+        spm = csr_matrix(
+            (one, (label, pred)), shape=(self.num_classes, self.num_classes))
         spm = spm.todense()
         self.confusion_matrix += spm
 
     def zero_matrix(self):
         """ Clear confusion matrix """
-        self.confusion_matrix = np.zeros([self.num_classes, self.num_classes],
-                                         dtype='int64')
+        self.confusion_matrix = np.zeros(
+            [self.num_classes, self.num_classes], dtype='int64')
 
     def mean_iou(self):
         iou_list = []

+ 4 - 3
paddlex/cv/models/utils/visualize.py

@@ -1,4 +1,4 @@
-# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
+# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -404,8 +404,9 @@ def draw_pr_curve(eval_details_file=None,
             plt.plot(x, sr_array, color=color, label=nm, linewidth=1)
         plt.legend(loc="lower left", fontsize=5)
         plt.savefig(
-            os.path.join(save_dir,
-                         "./{}_pr_curve(iou-{}).png".format(style, iou_thresh)),
+            os.path.join(
+                save_dir,
+                "./{}_pr_curve(iou-{}).png".format(style, iou_thresh)),
             dpi=800)
         plt.close()
 

+ 1 - 1
paddlex/cv/models/yolo_v3.py

@@ -1,4 +1,4 @@
-# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
+# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.

+ 1 - 1
paddlex/cv/nets/__init__.py

@@ -1,4 +1,4 @@
-# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
+# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.

+ 1 - 1
paddlex/cv/nets/alexnet.py

@@ -1,4 +1,4 @@
-#copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
+#copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
 #
 #Licensed under the Apache License, Version 2.0 (the "License");
 #you may not use this file except in compliance with the License.

+ 1 - 1
paddlex/cv/nets/backbone_utils.py

@@ -1,4 +1,4 @@
-# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
+# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.

+ 4 - 4
paddlex/cv/nets/densenet.py

@@ -1,11 +1,11 @@
-# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
-# 
+# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
+#
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
 # You may obtain a copy of the License at
-# 
+#
 #     http://www.apache.org/licenses/LICENSE-2.0
-# 
+#
 # Unless required by applicable law or agreed to in writing, software
 # distributed under the License is distributed on an "AS IS" BASIS,
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.

+ 1 - 1
paddlex/cv/nets/detection/__init__.py

@@ -1,4 +1,4 @@
-# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
+# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.

+ 1 - 1
paddlex/cv/nets/detection/yolo_v3.py

@@ -1,4 +1,4 @@
-# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
+# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.

+ 1 - 1
paddlex/cv/nets/mobilenet_v1.py

@@ -1,4 +1,4 @@
-# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
+# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.

+ 10 - 11
paddlex/cv/nets/mobilenet_v2.py

@@ -1,4 +1,4 @@
-# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
+# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -30,10 +30,10 @@ class MobileNetV2:
         self.output_stride = output_stride
         self.end_points = end_points
         self.decode_points = decode_points
-        self.bottleneck_params_list = [(1, 16, 1, 1), (6, 24, 2, 2),
-                                       (6, 32, 3, 2), (6, 64, 4, 2),
-                                       (6, 96, 3, 1), (6, 160, 3, 2),
-                                       (6, 320, 1, 1)]
+        self.bottleneck_params_list = [
+            (1, 16, 1, 1), (6, 24, 2, 2), (6, 32, 3, 2), (6, 64, 4, 2),
+            (6, 96, 3, 1), (6, 160, 3, 2), (6, 320, 1, 1)
+        ]
         self.modify_bottle_params(output_stride)
 
     def __call__(self, input):
@@ -104,11 +104,10 @@ class MobileNetV2:
             output = fluid.layers.pool2d(
                 input=output, pool_type='avg', global_pooling=True)
 
-            output = fluid.layers.fc(
-                input=output,
-                size=self.num_classes,
-                param_attr=ParamAttr(name='fc10_weights'),
-                bias_attr=ParamAttr(name='fc10_offset'))
+            output = fluid.layers.fc(input=output,
+                                     size=self.num_classes,
+                                     param_attr=ParamAttr(name='fc10_weights'),
+                                     bias_attr=ParamAttr(name='fc10_offset'))
         return output
 
     def modify_bottle_params(self, output_stride=None):
@@ -239,4 +238,4 @@ class MobileNetV2:
                 padding=1,
                 expansion_factor=t,
                 name=name + '_' + str(i + 1))
-        return last_residual_block, depthwise_output
+        return last_residual_block, depthwise_output

+ 1 - 1
paddlex/cv/nets/resnet.py

@@ -1,4 +1,4 @@
-# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
+# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.

+ 1 - 1
paddlex/cv/nets/segmentation/__init__.py

@@ -1,4 +1,4 @@
-# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
+# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.

+ 1 - 1
paddlex/cv/nets/segmentation/deeplabv3p.py

@@ -1,5 +1,5 @@
 # coding: utf8
-# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
+# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.

+ 1 - 1
paddlex/cv/nets/segmentation/fast_scnn.py

@@ -1,5 +1,5 @@
 # coding: utf8
-# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
+# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.

+ 1 - 1
paddlex/cv/nets/segmentation/hrnet.py

@@ -1,5 +1,5 @@
 # coding: utf8
-# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
+# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.

+ 1 - 1
paddlex/cv/nets/segmentation/model_utils/__init__.py

@@ -1,4 +1,4 @@
-# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
+# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.

+ 5 - 3
paddlex/cv/nets/segmentation/model_utils/libs.py

@@ -1,5 +1,5 @@
 # coding: utf8
-# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
+# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -148,7 +148,8 @@ def separate_conv(input,
         name=name_scope + 'weights',
         regularizer=fluid.regularizer.L2DecayRegularizer(
             regularization_coeff=0.0),
-        initializer=fluid.initializer.TruncatedNormal(loc=0.0, scale=0.33))
+        initializer=fluid.initializer.TruncatedNormal(
+            loc=0.0, scale=0.33))
     with scope('depthwise'):
         input = conv(
             input,
@@ -166,7 +167,8 @@ def separate_conv(input,
     param_attr = fluid.ParamAttr(
         name=name_scope + 'weights',
         regularizer=None,
-        initializer=fluid.initializer.TruncatedNormal(loc=0.0, scale=0.06))
+        initializer=fluid.initializer.TruncatedNormal(
+            loc=0.0, scale=0.06))
     with scope('pointwise'):
         input = conv(
             input, channel, 1, 1, groups=1, padding=0, param_attr=param_attr)

+ 6 - 5
paddlex/cv/nets/segmentation/model_utils/loss.py

@@ -1,4 +1,4 @@
-# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
+# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -24,8 +24,9 @@ def softmax_with_loss(logit,
                       ignore_index=255):
     ignore_mask = fluid.layers.cast(ignore_mask, 'float32')
     label = fluid.layers.elementwise_min(
-        label, fluid.layers.assign(
-            np.array([num_classes - 1], dtype=np.int32)))
+        label,
+        fluid.layers.assign(np.array(
+            [num_classes - 1], dtype=np.int32)))
     logit = fluid.layers.transpose(logit, [0, 2, 3, 1])
     logit = fluid.layers.reshape(logit, [-1, num_classes])
     label = fluid.layers.reshape(label, [-1, 1])
@@ -60,8 +61,8 @@ def softmax_with_loss(logit,
                 'Expect weight is a list, string or Variable, but receive {}'.
                 format(type(weight)))
         weight = fluid.layers.reshape(weight, [1, num_classes])
-        weighted_label_one_hot = fluid.layers.elementwise_mul(
-            label_one_hot, weight)
+        weighted_label_one_hot = fluid.layers.elementwise_mul(label_one_hot,
+                                                              weight)
         probs = fluid.layers.softmax(logit)
         loss = fluid.layers.cross_entropy(
             probs,

+ 1 - 1
paddlex/cv/nets/segmentation/unet.py

@@ -1,5 +1,5 @@
 # coding: utf8
-# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
+# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.

+ 12 - 10
paddlex/cv/nets/shufflenet_v2.py

@@ -1,11 +1,11 @@
-# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
-# 
+# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
+#
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
 # You may obtain a copy of the License at
-# 
+#
 #     http://www.apache.org/licenses/LICENSE-2.0
-# 
+#
 # Unless required by applicable law or agreed to in writing, software
 # distributed under the License is distributed on an "AS IS" BASIS,
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
@@ -96,11 +96,12 @@ class ShuffleNetV2():
                 pool_stride=1,
                 pool_padding=0,
                 pool_type='avg')
-            output = fluid.layers.fc(
-                input=output,
-                size=self.num_classes,
-                param_attr=ParamAttr(initializer=MSRA(), name='fc6_weights'),
-                bias_attr=ParamAttr(name='fc6_offset'))
+            output = fluid.layers.fc(input=output,
+                                     size=self.num_classes,
+                                     param_attr=ParamAttr(
+                                         initializer=MSRA(),
+                                         name='fc6_weights'),
+                                     bias_attr=ParamAttr(name='fc6_offset'))
         return output
 
     def conv_bn_layer(self,
@@ -122,7 +123,8 @@ class ShuffleNetV2():
             groups=num_groups,
             act=None,
             use_cudnn=use_cudnn,
-            param_attr=ParamAttr(initializer=MSRA(), name=name + '_weights'),
+            param_attr=ParamAttr(
+                initializer=MSRA(), name=name + '_weights'),
             bias_attr=False)
         out = int((input.shape[2] - 1) / float(stride) + 1)
         bn_name = name + '_bn'

+ 20 - 21
paddlex/cv/nets/xception.py

@@ -1,5 +1,5 @@
 # coding: utf8
-# copyright (c) 2019 PaddlePaddle Authors. All Rights Reserve.
+# copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -69,8 +69,7 @@ class Xception():
 
     def __call__(
             self,
-            input,
-    ):
+            input, ):
         self.stride = 2
         self.block_point = 0
         self.short_cuts = dict()
@@ -140,7 +139,8 @@ class Xception():
         param_attr = fluid.ParamAttr(
             name=name_scope + 'weights',
             regularizer=None,
-            initializer=fluid.initializer.TruncatedNormal(loc=0.0, scale=0.09))
+            initializer=fluid.initializer.TruncatedNormal(
+                loc=0.0, scale=0.09))
         with scope("entry_flow"):
             with scope("conv1"):
                 data = bn_relu(
@@ -178,10 +178,10 @@ class Xception():
             for i in range(block_num):
                 block_point = block_point + 1
                 with scope("block" + str(i + 1)):
-                    stride = strides[i] if check_stride(
-                        s * strides[i], output_stride) else 1
-                    data, short_cuts = self.xception_block(
-                        data, chns[i], [1, 1, stride])
+                    stride = strides[i] if check_stride(s * strides[i],
+                                                        output_stride) else 1
+                    data, short_cuts = self.xception_block(data, chns[i],
+                                                           [1, 1, stride])
                     s = s * stride
                     if check_points(block_point, self.decode_points):
                         self.short_cuts[block_point] = short_cuts[1]
@@ -205,8 +205,8 @@ class Xception():
             for i in range(block_num):
                 block_point = block_point + 1
                 with scope("block" + str(i + 1)):
-                    stride = strides[i] if check_stride(
-                        s * strides[i], output_stride) else 1
+                    stride = strides[i] if check_stride(s * strides[i],
+                                                        output_stride) else 1
                     data, short_cuts = self.xception_block(
                         data, chns[i], [1, 1, strides[i]], skip_conv=False)
                     s = s * stride
@@ -302,16 +302,15 @@ class Xception():
                 initializer=fluid.initializer.TruncatedNormal(
                     loc=0.0, scale=0.09))
             with scope('shortcut'):
-                skip = bn(
-                    conv(
-                        input,
-                        channels[-1],
-                        1,
-                        strides[-1],
-                        groups=1,
-                        padding=0,
-                        param_attr=param_attr),
-                    eps=1e-3)
+                skip = bn(conv(
+                    input,
+                    channels[-1],
+                    1,
+                    strides[-1],
+                    groups=1,
+                    padding=0,
+                    param_attr=param_attr),
+                          eps=1e-3)
         else:
             skip = input
         return data + skip, results
@@ -329,4 +328,4 @@ def xception_41(num_classes=None):
 
 def xception_71(num_classes=None):
     model = Xception(num_classes, 71)
-    return model
+    return model

+ 1 - 1
paddlex/cv/transforms/__init__.py

@@ -1,4 +1,4 @@
-# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
+# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.

+ 3 - 2
paddlex/cv/transforms/box_utils.py

@@ -1,4 +1,4 @@
-# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
+# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -170,7 +170,8 @@ def expand_segms(segms, x, y, height, width, ratio):
                                 0).astype(mask.dtype)
         expanded_mask[y:y + height, x:x + width] = mask
         rle = mask_util.encode(
-            np.array(expanded_mask, order='F', dtype=np.uint8))
+            np.array(
+                expanded_mask, order='F', dtype=np.uint8))
         return rle
 
     expanded_segms = []

+ 1 - 1
paddlex/cv/transforms/cls_transforms.py

@@ -1,4 +1,4 @@
-# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
+# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.

+ 1 - 1
paddlex/cv/transforms/det_transforms.py

@@ -1,4 +1,4 @@
-# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
+# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.

+ 1 - 1
paddlex/cv/transforms/imgaug_support.py

@@ -1,4 +1,4 @@
-# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
+# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.

+ 4 - 3
paddlex/cv/transforms/ops.py

@@ -1,4 +1,4 @@
-# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
+# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -69,8 +69,8 @@ def random_crop(im,
                 (float(im.shape[1]) / im.shape[0]) / (w**2))
     scale_max = min(scale[1], bound)
     scale_min = min(scale[0], bound)
-    target_area = im.shape[0] * im.shape[1] * np.random.uniform(
-        scale_min, scale_max)
+    target_area = im.shape[0] * im.shape[1] * np.random.uniform(scale_min,
+                                                                scale_max)
     target_size = math.sqrt(target_area)
     w = int(target_size * w)
     h = int(target_size * h)
@@ -146,6 +146,7 @@ def brightness(im, brightness_lower, brightness_upper):
     im += delta
     return im
 
+
 def rotate(im, rotate_lower, rotate_upper):
     rotate_delta = np.random.uniform(rotate_lower, rotate_upper)
     im = im.rotate(int(rotate_delta))

+ 1 - 1
paddlex/cv/transforms/seg_transforms.py

@@ -1,5 +1,5 @@
 # coding: utf8
-# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
+# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.

+ 168 - 159
paddlex/cv/transforms/visualize.py

@@ -1,4 +1,4 @@
-# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
+# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -48,181 +48,192 @@ def _draw_rectangle_and_cname(img, xmin, ymin, xmax, ymax, cname, color):
         thickness=line_width)
     return img
 
+
 def cls_compose(im, label=None, transforms=None, vdl_writer=None, step=0):
-        """
+    """
         Args:
             im (str/np.ndarray): 图像路径/图像np.ndarray数据。
             label (int): 每张图像所对应的类别序号。
             vdl_writer (visualdl.LogWriter): VisualDL存储器,日志信息将保存在其中。
                 当为None时,不对日志进行保存。默认为None。
             step (int): 数据预处理的轮数,当vdl_writer不为None时有效。默认为0。
-            
+
         Returns:
             tuple: 根据网络所需字段所组成的tuple;
                 字段由transforms中的最后一个数据预处理操作决定。
         """
-        if isinstance(im, np.ndarray):
-            if len(im.shape) != 3:
+    if isinstance(im, np.ndarray):
+        if len(im.shape) != 3:
+            raise Exception(
+                "im should be 3-dimension, but now is {}-dimensions".format(
+                    len(im.shape)))
+    else:
+        try:
+            im = cv2.imread(im).astype('float32')
+        except:
+            raise TypeError('Can\'t read The image file {}!'.format(im))
+    im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)
+    if vdl_writer is not None:
+        vdl_writer.add_image(
+            tag='0. OriginalImage/' + str(step), img=im, step=0)
+    op_id = 1
+    for op in transforms:
+        if isinstance(op, ClsTransform):
+            if vdl_writer is not None and hasattr(op, 'prob'):
+                op.prob = 1.0
+            outputs = op(im, label)
+            im = outputs[0]
+            if len(outputs) == 2:
+                label = outputs[1]
+            if isinstance(op, pdx.cv.transforms.cls_transforms.Normalize):
+                continue
+        else:
+            import imgaug.augmenters as iaa
+            if isinstance(op, iaa.Augmenter):
+                im = execute_imgaug(op, im)
+            outputs = (im, )
+            if label is not None:
+                outputs = (im, label)
+        if vdl_writer is not None:
+            tag = str(op_id) + '. ' + op.__class__.__name__ + '/' + str(step)
+            vdl_writer.add_image(tag=tag, img=im, step=0)
+        op_id += 1
+
+
+def det_compose(im,
+                im_info=None,
+                label_info=None,
+                transforms=None,
+                vdl_writer=None,
+                step=0,
+                labels=[],
+                catid2color=None):
+    def decode_image(im_file, im_info, label_info):
+        if im_info is None:
+            im_info = dict()
+        if isinstance(im_file, np.ndarray):
+            if len(im_file.shape) != 3:
                 raise Exception(
-                    "im should be 3-dimension, but now is {}-dimensions".
-                    format(len(im.shape)))
+                    "im should be 3-dimensions, but now is {}-dimensions".
+                    format(len(im_file.shape)))
+            im = im_file
         else:
             try:
-                im = cv2.imread(im).astype('float32')
+                im = cv2.imread(im_file).astype('float32')
             except:
-                raise TypeError('Can\'t read The image file {}!'.format(im))
+                raise TypeError('Can\'t read The image file {}!'.format(
+                    im_file))
         im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)
-        if vdl_writer is not None:
-            vdl_writer.add_image(tag='0. OriginalImage/' +  str(step),
-                                 img=im,
-                                 step=0)
-        op_id = 1
-        for op in transforms:
-            if isinstance(op, ClsTransform):
-                if vdl_writer is not None and hasattr(op, 'prob'):
-                    op.prob = 1.0
-                outputs = op(im, label)
-                im = outputs[0]
-                if len(outputs) == 2:
-                    label = outputs[1]
-                if isinstance(op, pdx.cv.transforms.cls_transforms.Normalize):
-                    continue
+        # make default im_info with [h, w, 1]
+        im_info['im_resize_info'] = np.array(
+            [im.shape[0], im.shape[1], 1.], dtype=np.float32)
+        im_info['image_shape'] = np.array([im.shape[0],
+                                           im.shape[1]]).astype('int32')
+        use_mixup = False
+        for t in transforms:
+            if type(t).__name__ == 'MixupImage':
+                use_mixup = True
+            if not use_mixup:
+                if 'mixup' in im_info:
+                    del im_info['mixup']
+        # decode mixup image
+        if 'mixup' in im_info:
+            im_info['mixup'] = \
+              decode_image(im_info['mixup'][0],
+                           im_info['mixup'][1],
+                           im_info['mixup'][2])
+        if label_info is None:
+            return (im, im_info)
+        else:
+            return (im, im_info, label_info)
+
+    outputs = decode_image(im, im_info, label_info)
+    im = outputs[0]
+    im_info = outputs[1]
+    if len(outputs) == 3:
+        label_info = outputs[2]
+    if vdl_writer is not None:
+        vdl_writer.add_image(
+            tag='0. OriginalImage/' + str(step), img=im, step=0)
+    op_id = 1
+    bboxes = label_info['gt_bbox']
+    transforms = [None] + transforms
+    for op in transforms:
+        if im is None:
+            return None
+        if isinstance(op, DetTransform) or op is None:
+            if vdl_writer is not None and hasattr(op, 'prob'):
+                op.prob = 1.0
+            if op is not None:
+                outputs = op(im, im_info, label_info)
             else:
-                import imgaug.augmenters as iaa
-                if isinstance(op, iaa.Augmenter):
-                    im = execute_imgaug(op, im)
-                outputs = (im, )
-                if label is not None:
-                    outputs = (im, label)
+                outputs = (im, im_info, label_info)
+            im = outputs[0]
+            vdl_im = im
             if vdl_writer is not None:
-                tag = str(op_id) + '. ' + op.__class__.__name__ + '/' +  str(step)
-                vdl_writer.add_image(tag=tag,
-                                     img=im,
-                                     step=0)
-            op_id += 1
-            
-def det_compose(im, im_info=None, label_info=None, transforms=None, vdl_writer=None, step=0,
-                labels=[], catid2color=None):
-        def decode_image(im_file, im_info, label_info):
-            if im_info is None:
-                im_info = dict()
-            if isinstance(im_file, np.ndarray):
-                if len(im_file.shape) != 3:
-                    raise Exception(
-                        "im should be 3-dimensions, but now is {}-dimensions".
-                        format(len(im_file.shape)))
-                im = im_file
-            else:
-                try:
-                    im = cv2.imread(im_file).astype('float32')
-                except:
-                    raise TypeError('Can\'t read The image file {}!'.format(
-                        im_file))
-            im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)
-            # make default im_info with [h, w, 1]
-            im_info['im_resize_info'] = np.array(
-                [im.shape[0], im.shape[1], 1.], dtype=np.float32)
-            im_info['image_shape'] = np.array([im.shape[0],
-                                               im.shape[1]]).astype('int32')
-            use_mixup = False
-            for t in transforms:
-                if type(t).__name__ == 'MixupImage':
-                    use_mixup = True
-                if not use_mixup:
-                    if 'mixup' in im_info:
-                        del im_info['mixup']
-            # decode mixup image
-            if 'mixup' in im_info:
-                im_info['mixup'] = \
-                  decode_image(im_info['mixup'][0],
-                               im_info['mixup'][1],
-                               im_info['mixup'][2])
-            if label_info is None:
-                return (im, im_info)
-            else:
-                return (im, im_info, label_info)
-            
-        outputs = decode_image(im, im_info, label_info)
-        im = outputs[0]
-        im_info = outputs[1]
-        if len(outputs) == 3:
-            label_info = outputs[2]
-        if vdl_writer is not None:
-            vdl_writer.add_image(tag='0. OriginalImage/' +  str(step),
-                                 img=im,
-                                 step=0)
-        op_id = 1
-        bboxes = label_info['gt_bbox']
-        transforms = [None] + transforms
-        for op in transforms:
-            if im is None:
-                return None
-            if isinstance(op, DetTransform) or op is None:
-                if vdl_writer is not None and hasattr(op, 'prob'):
-                    op.prob = 1.0
-                if op is not None:
-                    outputs = op(im, im_info, label_info)
-                else:
-                    outputs = (im, im_info, label_info)
-                im = outputs[0]
-                vdl_im = im
-                if vdl_writer is not None:
-                    if isinstance(op, pdx.cv.transforms.det_transforms.ResizeByShort):
-                        scale = outputs[1]['im_resize_info'][2]
-                        bboxes = bboxes * scale
-                    elif isinstance(op, pdx.cv.transforms.det_transforms.Resize):
-                        h = outputs[1]['image_shape'][0]
-                        w = outputs[1]['image_shape'][1]
-                        target_size = op.target_size
-                        if isinstance(target_size, int):
-                            h_scale = float(target_size) / h
-                            w_scale = float(target_size) / w
-                        else:
-                            h_scale = float(target_size[0]) / h
-                            w_scale = float(target_size[1]) / w
-                        bboxes[:,0] = bboxes[:,0] * w_scale
-                        bboxes[:,1] = bboxes[:,1] * h_scale
-                        bboxes[:,2] = bboxes[:,2] * w_scale
-                        bboxes[:,3] = bboxes[:,3] * h_scale
+                if isinstance(op,
+                              pdx.cv.transforms.det_transforms.ResizeByShort):
+                    scale = outputs[1]['im_resize_info'][2]
+                    bboxes = bboxes * scale
+                elif isinstance(op, pdx.cv.transforms.det_transforms.Resize):
+                    h = outputs[1]['image_shape'][0]
+                    w = outputs[1]['image_shape'][1]
+                    target_size = op.target_size
+                    if isinstance(target_size, int):
+                        h_scale = float(target_size) / h
+                        w_scale = float(target_size) / w
                     else:
-                        bboxes = outputs[2]['gt_bbox']
-                    if not isinstance(op, pdx.cv.transforms.det_transforms.RandomHorizontalFlip):
-                        for i in range(bboxes.shape[0]):
-                            bbox = bboxes[i]
-                            cname = labels[outputs[2]['gt_class'][i][0]-1]
-                            vdl_im = _draw_rectangle_and_cname(vdl_im, 
-                                                               int(bbox[0]), 
-                                                               int(bbox[1]), 
-                                                               int(bbox[2]), 
-                                                               int(bbox[3]), 
-                                                               cname, 
-                                                               catid2color[outputs[2]['gt_class'][i][0]-1])
-                    if isinstance(op, pdx.cv.transforms.det_transforms.Normalize):
-                        continue
-            else:
-                im = execute_imgaug(op, im)
-                if label_info is not None:
-                    outputs = (im, im_info, label_info)
+                        h_scale = float(target_size[0]) / h
+                        w_scale = float(target_size[1]) / w
+                    bboxes[:, 0] = bboxes[:, 0] * w_scale
+                    bboxes[:, 1] = bboxes[:, 1] * h_scale
+                    bboxes[:, 2] = bboxes[:, 2] * w_scale
+                    bboxes[:, 3] = bboxes[:, 3] * h_scale
                 else:
-                    outputs = (im, im_info)
-                vdl_im = im
-            if vdl_writer is not None:
-                tag = str(op_id) + '. ' + op.__class__.__name__ + '/' +  str(step)
-                if op is None:
-                    tag = str(op_id) + '. OriginalImageWithGTBox/' +  str(step)
-                vdl_writer.add_image(tag=tag,
-                                     img=vdl_im,
-                                     step=0)
-            op_id += 1
-            
-def seg_compose(im, im_info=None, label=None, transforms=None, vdl_writer=None, step=0):
+                    bboxes = outputs[2]['gt_bbox']
+                if not isinstance(
+                        op,
+                        pdx.cv.transforms.det_transforms.RandomHorizontalFlip):
+                    for i in range(bboxes.shape[0]):
+                        bbox = bboxes[i]
+                        cname = labels[outputs[2]['gt_class'][i][0] - 1]
+                        vdl_im = _draw_rectangle_and_cname(
+                            vdl_im,
+                            int(bbox[0]),
+                            int(bbox[1]),
+                            int(bbox[2]),
+                            int(bbox[3]), cname,
+                            catid2color[outputs[2]['gt_class'][i][0] - 1])
+                if isinstance(op, pdx.cv.transforms.det_transforms.Normalize):
+                    continue
+        else:
+            im = execute_imgaug(op, im)
+            if label_info is not None:
+                outputs = (im, im_info, label_info)
+            else:
+                outputs = (im, im_info)
+            vdl_im = im
+        if vdl_writer is not None:
+            tag = str(op_id) + '. ' + op.__class__.__name__ + '/' + str(step)
+            if op is None:
+                tag = str(op_id) + '. OriginalImageWithGTBox/' + str(step)
+            vdl_writer.add_image(tag=tag, img=vdl_im, step=0)
+        op_id += 1
+
+
+def seg_compose(im,
+                im_info=None,
+                label=None,
+                transforms=None,
+                vdl_writer=None,
+                step=0):
     if im_info is None:
         im_info = list()
     if isinstance(im, np.ndarray):
         if len(im.shape) != 3:
             raise Exception(
-                "im should be 3-dimensions, but now is {}-dimensions".
-                format(len(im.shape)))
+                "im should be 3-dimensions, but now is {}-dimensions".format(
+                    len(im.shape)))
     else:
         try:
             im = cv2.imread(im).astype('float32')
@@ -233,9 +244,8 @@ def seg_compose(im, im_info=None, label=None, transforms=None, vdl_writer=None,
         if not isinstance(label, np.ndarray):
             label = np.asarray(Image.open(label))
     if vdl_writer is not None:
-        vdl_writer.add_image(tag='0. OriginalImage' + '/' +  str(step),
-                             img=im,
-                             step=0)
+        vdl_writer.add_image(
+            tag='0. OriginalImage' + '/' + str(step), img=im, step=0)
     op_id = 1
     for op in transforms:
         if isinstance(op, SegTransform):
@@ -254,19 +264,18 @@ def seg_compose(im, im_info=None, label=None, transforms=None, vdl_writer=None,
             else:
                 outputs = (im, im_info)
         if vdl_writer is not None:
-            tag = str(op_id) + '. ' + op.__class__.__name__  + '/' +  str(step)
-            vdl_writer.add_image(tag=tag,
-                                 img=im,
-                                 step=0)
+            tag = str(op_id) + '. ' + op.__class__.__name__ + '/' + str(step)
+            vdl_writer.add_image(tag=tag, img=im, step=0)
         op_id += 1
 
+
 def visualize(dataset, img_count=3, save_dir='vdl_output'):
     '''对数据预处理/增强中间结果进行可视化。
     可使用VisualDL查看中间结果:
     1. VisualDL启动方式: visualdl --logdir vdl_output --port 8001
     2. 浏览器打开 https://0.0.0.0:8001即可,
         其中0.0.0.0为本机访问,如为远程服务, 改成相应机器IP
-    
+
     Args:
         dataset (paddlex.datasets): 数据集读取器。
         img_count (int): 需要进行数据预处理/增强的图像数目。默认为3。

+ 1 - 1
paddlex/deploy.py

@@ -1,4 +1,4 @@
-# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
+# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.

+ 1 - 1
paddlex/det.py

@@ -1,4 +1,4 @@
-# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
+# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.

+ 1 - 1
paddlex/interpret/__init__.py

@@ -1,4 +1,4 @@
-# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
+# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.

+ 1 - 1
paddlex/interpret/as_data_reader/__init__.py

@@ -1,4 +1,4 @@
-# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
+# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.

+ 5 - 4
paddlex/interpret/as_data_reader/data_path_utils.py

@@ -1,11 +1,11 @@
-# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
-# 
+# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
+#
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
 # You may obtain a copy of the License at
-# 
+#
 #     http://www.apache.org/licenses/LICENSE-2.0
-# 
+#
 # Unless required by applicable law or agreed to in writing, software
 # distributed under the License is distributed on an "AS IS" BASIS,
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
@@ -14,6 +14,7 @@
 
 import os
 
+
 def _find_classes(dir):
     # Faster and available in Python 3.5 and above
     classes = [d.name for d in os.scandir(dir) if d.is_dir()]

+ 13 - 8
paddlex/interpret/as_data_reader/readers.py

@@ -1,11 +1,11 @@
-# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
-# 
+# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
+#
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
 # You may obtain a copy of the License at
-# 
+#
 #     http://www.apache.org/licenses/LICENSE-2.0
-# 
+#
 # Unless required by applicable law or agreed to in writing, software
 # distributed under the License is distributed on an "AS IS" BASIS,
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
@@ -138,8 +138,10 @@ class ReaderConfig(object):
         ...
 
     """
+
     def __init__(self, dataset_dir, is_test):
-        image_paths, labels, self.num_classes = self.get_dataset_info(dataset_dir, is_test)
+        image_paths, labels, self.num_classes = self.get_dataset_info(
+            dataset_dir, is_test)
         random_per = np.random.permutation(range(len(image_paths)))
         self.image_paths = image_paths[random_per]
         self.labels = labels[random_per]
@@ -147,7 +149,8 @@ class ReaderConfig(object):
 
     def get_reader(self):
         def reader():
-            IMG_EXTENSIONS = ('.jpg', '.jpeg', '.png', '.ppm', '.bmp', '.pgm', '.tif', '.tiff', '.webp')
+            IMG_EXTENSIONS = ('.jpg', '.jpeg', '.png', '.ppm', '.bmp', '.pgm',
+                              '.tif', '.tiff', '.webp')
             target_size = 256
             crop_size = 224
 
@@ -171,7 +174,8 @@ class ReaderConfig(object):
         return reader
 
     def get_dataset_info(self, dataset_dir, is_test=False):
-        IMG_EXTENSIONS = ('.jpg', '.jpeg', '.png', '.ppm', '.bmp', '.pgm', '.tif', '.tiff', '.webp')
+        IMG_EXTENSIONS = ('.jpg', '.jpeg', '.png', '.ppm', '.bmp', '.pgm',
+                          '.tif', '.tiff', '.webp')
 
         # read
         if is_test:
@@ -199,7 +203,8 @@ class ReaderConfig(object):
 
 def create_reader(list_image_path, list_label=None, is_test=False):
     def reader():
-        IMG_EXTENSIONS = ('.jpg', '.jpeg', '.png', '.ppm', '.bmp', '.pgm', '.tif', '.tiff', '.webp')
+        IMG_EXTENSIONS = ('.jpg', '.jpeg', '.png', '.ppm', '.bmp', '.pgm',
+                          '.tif', '.tiff', '.webp')
         target_size = 256
         crop_size = 224
 

+ 1 - 1
paddlex/interpret/core/__init__.py

@@ -1,4 +1,4 @@
-# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
+# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.

+ 4 - 4
paddlex/interpret/core/_session_preparation.py

@@ -1,11 +1,11 @@
-# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
-# 
+# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
+#
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
 # You may obtain a copy of the License at
-# 
+#
 #     http://www.apache.org/licenses/LICENSE-2.0
-# 
+#
 # Unless required by applicable law or agreed to in writing, software
 # distributed under the License is distributed on an "AS IS" BASIS,
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.

+ 4 - 4
paddlex/interpret/core/interpretation.py

@@ -1,11 +1,11 @@
-# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
-# 
+# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
+#
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
 # You may obtain a copy of the License at
-# 
+#
 #     http://www.apache.org/licenses/LICENSE-2.0
-# 
+#
 # Unless required by applicable law or agreed to in writing, software
 # distributed under the License is distributed on an "AS IS" BASIS,
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.

+ 4 - 4
paddlex/interpret/core/interpretation_algorithms.py

@@ -1,11 +1,11 @@
-# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
-# 
+# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
+#
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
 # You may obtain a copy of the License at
-# 
+#
 #     http://www.apache.org/licenses/LICENSE-2.0
-# 
+#
 # Unless required by applicable law or agreed to in writing, software
 # distributed under the License is distributed on an "AS IS" BASIS,
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.

+ 4 - 4
paddlex/interpret/core/normlime_base.py

@@ -1,11 +1,11 @@
-# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
-# 
+# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
+#
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
 # You may obtain a copy of the License at
-# 
+#
 #     http://www.apache.org/licenses/LICENSE-2.0
-# 
+#
 # Unless required by applicable law or agreed to in writing, software
 # distributed under the License is distributed on an "AS IS" BASIS,
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.

+ 1 - 1
paddlex/interpret/interpretation_predict.py

@@ -1,4 +1,4 @@
-# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
+# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.

+ 1 - 1
paddlex/interpret/visualize.py

@@ -1,4 +1,4 @@
-# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
+# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.

+ 1 - 1
paddlex/seg.py

@@ -1,4 +1,4 @@
-# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
+# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.

+ 1 - 1
paddlex/slim.py

@@ -1,4 +1,4 @@
-# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
+# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.

+ 4 - 4
paddlex/utils/__init__.py

@@ -1,11 +1,11 @@
-# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
-# 
+# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
+#
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
 # You may obtain a copy of the License at
-# 
+#
 #     http://www.apache.org/licenses/LICENSE-2.0
-# 
+#
 # Unless required by applicable law or agreed to in writing, software
 # distributed under the License is distributed on an "AS IS" BASIS,
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.

+ 4 - 3
paddlex/utils/logging.py

@@ -1,4 +1,4 @@
-# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
+# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -29,8 +29,9 @@ def log(level=2, message="", use_color=False):
     current_time = time.strftime("%Y-%m-%d %H:%M:%S", time_array)
     if paddlex.log_level >= level:
         if use_color:
-            print("\033[1;31;40m{} [{}]\t{}\033[0m".format(current_time, levels[
-                level], message).encode("utf-8").decode("latin1"))
+            print("\033[1;31;40m{} [{}]\t{}\033[0m".format(
+                current_time, levels[level], message).encode("utf-8").decode(
+                    "latin1"))
         else:
             print("{} [{}]\t{}".format(current_time, levels[level], message)
                   .encode("utf-8").decode("latin1"))

+ 3 - 3
paddlex/utils/save.py

@@ -1,4 +1,4 @@
-# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
+# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -531,8 +531,8 @@ def save_mask_inference_model(dirname,
     if isinstance(target_vars, Variable):
         target_vars = [target_vars]
     elif export_for_deployment:
-        if not (bool(target_vars)
-                and all(isinstance(var, Variable) for var in target_vars)):
+        if not (bool(target_vars) and
+                all(isinstance(var, Variable) for var in target_vars)):
             raise ValueError("'target_vars' should be a list of Variable.")
 
     main_program = _get_valid_program(main_program)

+ 3 - 1
paddlex/utils/utils.py

@@ -1,4 +1,4 @@
-# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
+# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -50,6 +50,7 @@ def get_environ_info():
                 info['num'] = fluid.core.get_cuda_device_count()
     return info
 
+
 def path_normalization(path):
     win_sep = "\\"
     other_sep = "/"
@@ -59,6 +60,7 @@ def path_normalization(path):
         path = other_sep.join(path.split(win_sep))
     return path
 
+
 def parse_param_file(param_file, return_shape=True):
     from paddle.fluid.proto.framework_pb2 import VarType
     f = open(param_file, 'rb')

+ 1 - 1
setup.py

@@ -1,4 +1,4 @@
-# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
+# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.

+ 4 - 4
tutorials/compress/classification/cal_sensitivities_file.py

@@ -1,11 +1,11 @@
-# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
-# 
+# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
+#
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
 # You may obtain a copy of the License at
-# 
+#
 #     http://www.apache.org/licenses/LICENSE-2.0
-# 
+#
 # Unless required by applicable law or agreed to in writing, software
 # distributed under the License is distributed on an "AS IS" BASIS,
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.

+ 6 - 8
tutorials/compress/classification/mobilenetv2.py

@@ -1,11 +1,11 @@
-# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
-# 
+# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
+#
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
 # You may obtain a copy of the License at
-# 
+#
 #     http://www.apache.org/licenses/LICENSE-2.0
-# 
+#
 # Unless required by applicable law or agreed to in writing, software
 # distributed under the License is distributed on an "AS IS" BASIS,
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
@@ -29,13 +29,11 @@ def train(model_dir=None, sensitivities_file=None, eval_metric_loss=0.05):
     # 定义训练和验证时的transforms
     train_transforms = transforms.Compose([
         transforms.RandomCrop(crop_size=224),
-        transforms.RandomHorizontalFlip(),
-        transforms.Normalize()
+        transforms.RandomHorizontalFlip(), transforms.Normalize()
     ])
     eval_transforms = transforms.Compose([
         transforms.ResizeByShort(short_size=256),
-        transforms.CenterCrop(crop_size=224),
-        transforms.Normalize()
+        transforms.CenterCrop(crop_size=224), transforms.Normalize()
     ])
 
     # 定义训练和验证所用的数据集

+ 1 - 1
tutorials/compress/detection/cal_sensitivities_file.py

@@ -1,4 +1,4 @@
-#copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
+#copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
 #
 #Licensed under the Apache License, Version 2.0 (the "License");
 #you may not use this file except in compliance with the License.

+ 7 - 10
tutorials/compress/detection/yolov3_mobilenet.py

@@ -1,4 +1,4 @@
-#copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
+#copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
 #
 #Licensed under the Apache License, Version 2.0 (the "License");
 #you may not use this file except in compliance with the License.
@@ -28,17 +28,14 @@ def train(model_dir, sensitivities_file, eval_metric_loss):
 
     # 定义训练和验证时的transforms
     train_transforms = transforms.Compose([
-        transforms.MixupImage(mixup_epoch=250),
-        transforms.RandomDistort(),
-        transforms.RandomExpand(),
-        transforms.RandomCrop(),
-        transforms.Resize(target_size=608, interp='RANDOM'),
-        transforms.RandomHorizontalFlip(),
-        transforms.Normalize()
+        transforms.MixupImage(mixup_epoch=250), transforms.RandomDistort(),
+        transforms.RandomExpand(), transforms.RandomCrop(), transforms.Resize(
+            target_size=608, interp='RANDOM'),
+        transforms.RandomHorizontalFlip(), transforms.Normalize()
     ])
     eval_transforms = transforms.Compose([
-        transforms.Resize(target_size=608, interp='CUBIC'),
-        transforms.Normalize()
+        transforms.Resize(
+            target_size=608, interp='CUBIC'), transforms.Normalize()
     ])
 
     # 定义训练和验证所用的数据集

+ 1 - 1
tutorials/compress/segmentation/cal_sensitivities_file.py

@@ -1,4 +1,4 @@
-#copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
+#copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
 #
 #Licensed under the Apache License, Version 2.0 (the "License");
 #you may not use this file except in compliance with the License.

+ 4 - 7
tutorials/compress/segmentation/unet.py

@@ -1,4 +1,4 @@
-#copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
+#copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
 #
 #Licensed under the Apache License, Version 2.0 (the "License");
 #you may not use this file except in compliance with the License.
@@ -28,15 +28,12 @@ def train(model_dir, sensitivities_file, eval_metric_loss):
 
     # 定义训练和验证时的transforms
     train_transforms = transforms.Compose([
-        transforms.RandomHorizontalFlip(),
-        transforms.ResizeRangeScaling(),
-        transforms.RandomPaddingCrop(crop_size=512),
-        transforms.Normalize()
+        transforms.RandomHorizontalFlip(), transforms.ResizeRangeScaling(),
+        transforms.RandomPaddingCrop(crop_size=512), transforms.Normalize()
     ])
     eval_transforms = transforms.Compose([
         transforms.ResizeByLong(long_size=512),
-        transforms.Padding(target_size=512),
-        transforms.Normalize()
+        transforms.Padding(target_size=512), transforms.Normalize()
     ])
 
     # 定义训练和验证所用的数据集