瀏覽代碼

add paddlex.png

FlyingQianMM 4 年之前
父節點
當前提交
52a214ddac
共有 100 個文件被更改,包括 1201 次插入973 次删除
  1. 1 1
      deploy/cpp/model_deploy/common/src/paddle_deploy.cpp
  2. 二進制
      docs/gui/images/QR2.jpg
  3. 二進制
      docs/gui/images/paddlex.png
  4. 8 8
      examples/C#_deploy/model_infer.cpp
  5. 1 1
      paddlex/cv/models/utils/det_metrics/coco_utils.py
  6. 8 8
      paddlex/paddleseg/core/train.py
  7. 5 7
      paddlex/paddleseg/core/val.py
  8. 9 10
      paddlex/paddleseg/cvlibs/config.py
  9. 4 5
      paddlex/paddleseg/cvlibs/manager.py
  10. 2 3
      paddlex/paddleseg/datasets/cityscapes.py
  11. 2 3
      paddlex/paddleseg/datasets/cocostuff.py
  12. 2 2
      paddlex/paddleseg/datasets/optic_disc_seg.py
  13. 2 1
      paddlex/paddleseg/datasets/voc.py
  14. 3 1
      paddlex/paddleseg/models/ann.py
  15. 22 11
      paddlex/paddleseg/models/attention_unet.py
  16. 2 1
      paddlex/paddleseg/models/backbones/hrnet.py
  17. 2 1
      paddlex/paddleseg/models/backbones/mobilenetv3.py
  18. 12 9
      paddlex/paddleseg/models/backbones/resnet_vd.py
  19. 19 11
      paddlex/paddleseg/models/bisenet.py
  20. 5 3
      paddlex/paddleseg/models/danet.py
  21. 13 6
      paddlex/paddleseg/models/decoupled_segnet.py
  22. 2 1
      paddlex/paddleseg/models/deeplab.py
  23. 9 4
      paddlex/paddleseg/models/dnlnet.py
  24. 10 6
      paddlex/paddleseg/models/emanet.py
  25. 3 1
      paddlex/paddleseg/models/fast_scnn.py
  26. 2 1
      paddlex/paddleseg/models/gcnet.py
  27. 7 3
      paddlex/paddleseg/models/gscnn.py
  28. 2 1
      paddlex/paddleseg/models/hardnet.py
  29. 17 12
      paddlex/paddleseg/models/isanet.py
  30. 2 2
      paddlex/paddleseg/models/layers/activation.py
  31. 2 1
      paddlex/paddleseg/models/layers/layer_libs.py
  32. 2 1
      paddlex/paddleseg/models/layers/nonlocal2d.py
  33. 2 1
      paddlex/paddleseg/models/losses/cross_entropy_loss.py
  34. 4 4
      paddlex/paddleseg/models/losses/lovasz_loss.py
  35. 5 3
      paddlex/paddleseg/models/ocrnet.py
  36. 2 2
      paddlex/paddleseg/models/pspnet.py
  37. 10 6
      paddlex/paddleseg/models/sfnet.py
  38. 26 8
      paddlex/paddleseg/models/shufflenet_slim.py
  39. 22 13
      paddlex/paddleseg/models/unet_3plus.py
  40. 4 3
      paddlex/paddleseg/models/unet_plusplus.py
  41. 5 4
      paddlex/paddleseg/transforms/functional.py
  42. 13 10
      paddlex/paddleseg/transforms/transforms.py
  43. 2 1
      paddlex/paddleseg/utils/config_check.py
  44. 4 4
      paddlex/paddleseg/utils/download.py
  45. 2 3
      paddlex/paddleseg/utils/logger.py
  46. 2 2
      paddlex/paddleseg/utils/metrics.py
  47. 18 15
      paddlex/paddleseg/utils/progbar.py
  48. 11 11
      paddlex/paddleseg/utils/utils.py
  49. 18 6
      paddlex/ppcls/data/imaug/operators.py
  50. 339 279
      paddlex/ppcls/modeling/architectures/inception_v3.py
  51. 6 5
      paddlex/ppcls/modeling/architectures/resnet.py
  52. 8 2
      paddlex/ppcls/modeling/architectures/resnext.py
  53. 12 8
      paddlex/ppcls/modeling/loss.py
  54. 1 1
      paddlex/ppcls/utils/check.py
  55. 1 1
      paddlex/ppcls/utils/logger.py
  56. 8 3
      paddlex/ppcls/utils/metrics.py
  57. 4 1
      paddlex/ppdet/core/workspace.py
  58. 12 12
      paddlex/ppdet/data/__init__.py
  59. 8 8
      paddlex/ppdet/data/reader.py
  60. 12 12
      paddlex/ppdet/data/source/category.py
  61. 24 22
      paddlex/ppdet/data/source/coco.py
  62. 12 12
      paddlex/ppdet/data/source/dataset.py
  63. 12 12
      paddlex/ppdet/data/source/keypoint_coco.py
  64. 5 5
      paddlex/ppdet/data/source/mot.py
  65. 2 2
      paddlex/ppdet/data/source/voc.py
  66. 24 19
      paddlex/ppdet/data/transform/autoaugment_utils.py
  67. 27 17
      paddlex/ppdet/data/transform/batch_operators.py
  68. 2 2
      paddlex/ppdet/data/transform/gridmask_utils.py
  69. 2 1
      paddlex/ppdet/data/transform/keypoint_operators.py
  70. 9 8
      paddlex/ppdet/data/transform/mot_operators.py
  71. 6 2
      paddlex/ppdet/data/transform/op_helper.py
  72. 29 21
      paddlex/ppdet/data/transform/operators.py
  73. 12 12
      paddlex/ppdet/engine/__init__.py
  74. 20 17
      paddlex/ppdet/engine/callbacks.py
  75. 12 12
      paddlex/ppdet/engine/env.py
  76. 14 13
      paddlex/ppdet/engine/export_utils.py
  77. 12 12
      paddlex/ppdet/metrics/coco_utils.py
  78. 2 2
      paddlex/ppdet/metrics/json_results.py
  79. 16 16
      paddlex/ppdet/metrics/keypoint_metrics.py
  80. 7 6
      paddlex/ppdet/metrics/map_utils.py
  81. 16 16
      paddlex/ppdet/metrics/metrics.py
  82. 2 1
      paddlex/ppdet/metrics/mot_eval_utils.py
  83. 12 12
      paddlex/ppdet/metrics/mot_metrics.py
  84. 8 6
      paddlex/ppdet/metrics/widerface_utils.py
  85. 12 12
      paddlex/ppdet/model_zoo/__init__.py
  86. 12 12
      paddlex/ppdet/model_zoo/model_zoo.py
  87. 0 1
      paddlex/ppdet/model_zoo/tests/test_get_model.py
  88. 12 12
      paddlex/ppdet/modeling/__init__.py
  89. 7 7
      paddlex/ppdet/modeling/architectures/__init__.py
  90. 14 13
      paddlex/ppdet/modeling/architectures/cascade_rcnn.py
  91. 12 12
      paddlex/ppdet/modeling/architectures/centernet.py
  92. 12 12
      paddlex/ppdet/modeling/architectures/deepsort.py
  93. 12 12
      paddlex/ppdet/modeling/architectures/fairmot.py
  94. 12 12
      paddlex/ppdet/modeling/architectures/faster_rcnn.py
  95. 12 12
      paddlex/ppdet/modeling/architectures/fcos.py
  96. 15 14
      paddlex/ppdet/modeling/architectures/jde.py
  97. 14 14
      paddlex/ppdet/modeling/architectures/keypoint_hrhrnet.py
  98. 4 4
      paddlex/ppdet/modeling/architectures/keypoint_hrnet.py
  99. 12 12
      paddlex/ppdet/modeling/architectures/mask_rcnn.py
  100. 11 11
      paddlex/ppdet/modeling/architectures/s2anet.py

+ 1 - 1
deploy/cpp/model_deploy/common/src/paddle_deploy.cpp

@@ -19,4 +19,4 @@ Model* CreateModel(const std::string& name) {
 	return PaddleDeploy::ModelFactory::CreateObject(name);
 	return PaddleDeploy::ModelFactory::CreateObject(name);
 }
 }
 
 
-}  // namespace PaddleDeploy
+}  // namespace PaddleDeploy

二進制
docs/gui/images/QR2.jpg


二進制
docs/gui/images/paddlex.png


+ 8 - 8
examples/C#_deploy/model_infer.cpp

@@ -41,21 +41,21 @@ extern "C" __declspec(dllexport) void InitModel(const char* model_type, const ch
 	if (init)
 	if (init)
 	{
 	{
 		std::cout << "init model success" << std::endl;
 		std::cout << "init model success" << std::endl;
-	}	
+	}
 }
 }
 /*
 /*
 * img: input for predicting.
 * img: input for predicting.
-* 
+*
 * nWidth: width of img.
 * nWidth: width of img.
-* 
+*
 * nHeight: height of img.
 * nHeight: height of img.
-* 
+*
 * nChannel: channel of img.
 * nChannel: channel of img.
-* 
+*
 * output: result of pridict ,include category_id£¬score£¬coordinate¡£
 * output: result of pridict ,include category_id£¬score£¬coordinate¡£
-* 
+*
 * nBoxesNum£º number of box
 * nBoxesNum£º number of box
-* 
+*
 * LabelList: label list of result
 * LabelList: label list of result
 */
 */
 extern "C" __declspec(dllexport) void ModelPredict(const unsigned char* img, int nWidth, int nHeight,int nChannel, float* output, int* nBoxesNum, char* LabelList)
 extern "C" __declspec(dllexport) void ModelPredict(const unsigned char* img, int nWidth, int nHeight,int nChannel, float* output, int* nBoxesNum, char* LabelList)
@@ -110,7 +110,7 @@ extern "C" __declspec(dllexport) void ModelPredict(const unsigned char* img, int
 			output[num * 6 + 2] = results[num].det_result->boxes[i].coordinate[0];
 			output[num * 6 + 2] = results[num].det_result->boxes[i].coordinate[0];
 			output[num * 6 + 3] = results[num].det_result->boxes[i].coordinate[1];
 			output[num * 6 + 3] = results[num].det_result->boxes[i].coordinate[1];
 			output[num * 6 + 4] = results[num].det_result->boxes[i].coordinate[2];
 			output[num * 6 + 4] = results[num].det_result->boxes[i].coordinate[2];
-			output[num * 6 + 5] = results[num].det_result->boxes[i].coordinate[3];						
+			output[num * 6 + 5] = results[num].det_result->boxes[i].coordinate[3];
 		}
 		}
 	}
 	}
 	memcpy(LabelList, label.c_str(), strlen(label.c_str()));
 	memcpy(LabelList, label.c_str(), strlen(label.c_str()));

+ 1 - 1
paddlex/cv/models/utils/det_metrics/coco_utils.py

@@ -131,7 +131,7 @@ def cocoapi_eval(anns,
         results_flatten = list(itertools.chain(*results_per_category))
         results_flatten = list(itertools.chain(*results_per_category))
         headers = ['category', 'AP'] * (num_columns // 2)
         headers = ['category', 'AP'] * (num_columns // 2)
         results_2d = itertools.zip_longest(
         results_2d = itertools.zip_longest(
-            *[results_flatten[i::num_columns] for i in range(num_columns)])
+            * [results_flatten[i::num_columns] for i in range(num_columns)])
         table_data = [headers]
         table_data = [headers]
         table_data += [result for result in results_2d]
         table_data += [result for result in results_2d]
         table = AsciiTable(table_data)
         table = AsciiTable(table_data)

+ 8 - 8
paddlex/paddleseg/core/train.py

@@ -110,8 +110,7 @@ def train(model,
         train_dataset,
         train_dataset,
         batch_sampler=batch_sampler,
         batch_sampler=batch_sampler,
         num_workers=num_workers,
         num_workers=num_workers,
-        return_list=True,
-    )
+        return_list=True, )
 
 
     if use_vdl:
     if use_vdl:
         from visualdl import LogWriter
         from visualdl import LogWriter
@@ -176,9 +175,9 @@ def train(model,
                 eta = calculate_eta(remain_iters, avg_train_batch_cost)
                 eta = calculate_eta(remain_iters, avg_train_batch_cost)
                 logger.info(
                 logger.info(
                     "[TRAIN] epoch: {}, iter: {}/{}, loss: {:.4f}, lr: {:.6f}, batch_cost: {:.4f}, reader_cost: {:.5f}, ips: {:.4f} samples/sec | ETA {}"
                     "[TRAIN] epoch: {}, iter: {}/{}, loss: {:.4f}, lr: {:.6f}, batch_cost: {:.4f}, reader_cost: {:.5f}, ips: {:.4f} samples/sec | ETA {}"
-                    .format((iter - 1) // iters_per_epoch + 1, iter, iters,
-                            avg_loss, lr, avg_train_batch_cost,
-                            avg_train_reader_cost,
+                    .format((iter - 1
+                             ) // iters_per_epoch + 1, iter, iters, avg_loss,
+                            lr, avg_train_batch_cost, avg_train_reader_cost,
                             batch_cost_averager.get_ips_average(), eta))
                             batch_cost_averager.get_ips_average(), eta))
                 if use_vdl:
                 if use_vdl:
                     log_writer.add_scalar('Train/loss', avg_loss, iter)
                     log_writer.add_scalar('Train/loss', avg_loss, iter)
@@ -201,14 +200,15 @@ def train(model,
                 reader_cost_averager.reset()
                 reader_cost_averager.reset()
                 batch_cost_averager.reset()
                 batch_cost_averager.reset()
 
 
-            if (iter % save_interval == 0
-                    or iter == iters) and (val_dataset is not None):
+            if (iter % save_interval == 0 or
+                    iter == iters) and (val_dataset is not None):
                 num_workers = 1 if num_workers > 0 else 0
                 num_workers = 1 if num_workers > 0 else 0
                 mean_iou, acc, _, _, _ = evaluate(
                 mean_iou, acc, _, _, _ = evaluate(
                     model, val_dataset, num_workers=num_workers)
                     model, val_dataset, num_workers=num_workers)
                 model.train()
                 model.train()
 
 
-            if (iter % save_interval == 0 or iter == iters) and local_rank == 0:
+            if (iter % save_interval == 0 or
+                    iter == iters) and local_rank == 0:
                 current_save_dir = os.path.join(save_dir,
                 current_save_dir = os.path.join(save_dir,
                                                 "iter_{}".format(iter))
                                                 "iter_{}".format(iter))
                 if not os.path.isdir(current_save_dir):
                 if not os.path.isdir(current_save_dir):

+ 5 - 7
paddlex/paddleseg/core/val.py

@@ -72,8 +72,7 @@ def evaluate(model,
         eval_dataset,
         eval_dataset,
         batch_sampler=batch_sampler,
         batch_sampler=batch_sampler,
         num_workers=num_workers,
         num_workers=num_workers,
-        return_list=True,
-    )
+        return_list=True, )
 
 
     total_iters = len(loader)
     total_iters = len(loader)
     intersect_area_all = 0
     intersect_area_all = 0
@@ -81,9 +80,8 @@ def evaluate(model,
     label_area_all = 0
     label_area_all = 0
 
 
     if print_detail:
     if print_detail:
-        logger.info(
-            "Start evaluating (total_samples: {}, total_iters: {})...".format(
-                len(eval_dataset), total_iters))
+        logger.info("Start evaluating (total_samples: {}, total_iters: {})...".
+                    format(len(eval_dataset), total_iters))
     progbar_val = progbar.Progbar(target=total_iters, verbose=1)
     progbar_val = progbar.Progbar(target=total_iters, verbose=1)
     reader_cost_averager = TimeAverager()
     reader_cost_averager = TimeAverager()
     batch_cost_averager = TimeAverager()
     batch_cost_averager = TimeAverager()
@@ -167,8 +165,8 @@ def evaluate(model,
 
 
     if print_detail:
     if print_detail:
         logger.info(
         logger.info(
-            "[EVAL] #Images: {} mIoU: {:.4f} Acc: {:.4f} Kappa: {:.4f} ".format(
-                len(eval_dataset), miou, acc, kappa))
+            "[EVAL] #Images: {} mIoU: {:.4f} Acc: {:.4f} Kappa: {:.4f} ".
+            format(len(eval_dataset), miou, acc, kappa))
         logger.info("[EVAL] Class IoU: \n" + str(np.round(class_iou, 4)))
         logger.info("[EVAL] Class IoU: \n" + str(np.round(class_iou, 4)))
         logger.info("[EVAL] Class Acc: \n" + str(np.round(class_acc, 4)))
         logger.info("[EVAL] Class Acc: \n" + str(np.round(class_acc, 4)))
     return miou, acc, class_iou, class_acc, kappa
     return miou, acc, class_iou, class_acc, kappa

+ 9 - 10
paddlex/paddleseg/cvlibs/config.py

@@ -67,9 +67,9 @@ class Config(object):
 
 
     def __init__(self,
     def __init__(self,
                  path: str,
                  path: str,
-                 learning_rate: float = None,
-                 batch_size: int = None,
-                 iters: int = None):
+                 learning_rate: float=None,
+                 batch_size: int=None,
+                 iters: int=None):
         if not path:
         if not path:
             raise ValueError('Please specify the configuration file path.')
             raise ValueError('Please specify the configuration file path.')
 
 
@@ -113,9 +113,9 @@ class Config(object):
         return dic
         return dic
 
 
     def update(self,
     def update(self,
-               learning_rate: float = None,
-               batch_size: int = None,
-               iters: int = None):
+               learning_rate: float=None,
+               batch_size: int=None,
+               iters: int=None):
         '''Update config'''
         '''Update config'''
         if learning_rate:
         if learning_rate:
             if 'lr_scheduler' in self.dic:
             if 'lr_scheduler' in self.dic:
@@ -207,10 +207,9 @@ class Config(object):
 
 
     @property
     @property
     def decay_args(self) -> dict:
     def decay_args(self) -> dict:
-        args = self.dic.get('learning_rate', {}).get('decay', {
-            'type': 'poly',
-            'power': 0.9
-        }).copy()
+        args = self.dic.get('learning_rate', {}).get(
+            'decay', {'type': 'poly',
+                      'power': 0.9}).copy()
 
 
         if args['type'] == 'poly':
         if args['type'] == 'poly':
             args.setdefault('decay_steps', self.iters)
             args.setdefault('decay_steps', self.iters)

+ 4 - 5
paddlex/paddleseg/cvlibs/manager.py

@@ -74,8 +74,8 @@ class ComponentManager:
 
 
     def __getitem__(self, item):
     def __getitem__(self, item):
         if item not in self._components_dict.keys():
         if item not in self._components_dict.keys():
-            raise KeyError("{} does not exist in availabel {}".format(
-                item, self))
+            raise KeyError("{} does not exist in availabel {}".format(item,
+                                                                      self))
         return self._components_dict[item]
         return self._components_dict[item]
 
 
     @property
     @property
@@ -100,9 +100,8 @@ class ComponentManager:
 
 
         # Currently only support class or function type
         # Currently only support class or function type
         if not (inspect.isclass(component) or inspect.isfunction(component)):
         if not (inspect.isclass(component) or inspect.isfunction(component)):
-            raise TypeError(
-                "Expect class/function type, but received {}".format(
-                    type(component)))
+            raise TypeError("Expect class/function type, but received {}".
+                            format(type(component)))
 
 
         # Obtain the internal name of the component
         # Obtain the internal name of the component
         component_name = component.__name__
         component_name = component.__name__

+ 2 - 3
paddlex/paddleseg/datasets/cityscapes.py

@@ -82,6 +82,5 @@ class Cityscapes(Dataset):
         img_files = sorted(
         img_files = sorted(
             glob.glob(os.path.join(img_dir, mode, '*', '*_leftImg8bit.png')))
             glob.glob(os.path.join(img_dir, mode, '*', '*_leftImg8bit.png')))
 
 
-        self.file_list = [[
-            img_path, label_path
-        ] for img_path, label_path in zip(img_files, label_files)]
+        self.file_list = [[img_path, label_path] for img_path, label_path in
+                          zip(img_files, label_files)]

+ 2 - 3
paddlex/paddleseg/datasets/cocostuff.py

@@ -75,6 +75,5 @@ class CocoStuff(Dataset):
         img_files = sorted(
         img_files = sorted(
             glob.glob(os.path.join(img_dir, mode + '2017', '*.jpg')))
             glob.glob(os.path.join(img_dir, mode + '2017', '*.jpg')))
 
 
-        self.file_list = [[
-            img_path, label_path
-        ] for img_path, label_path in zip(img_files, label_files)]
+        self.file_list = [[img_path, label_path] for img_path, label_path in
+                          zip(img_files, label_files)]

+ 2 - 2
paddlex/paddleseg/datasets/optic_disc_seg.py

@@ -53,8 +53,8 @@ class OpticDiscSeg(Dataset):
 
 
         if mode not in ['train', 'val', 'test']:
         if mode not in ['train', 'val', 'test']:
             raise ValueError(
             raise ValueError(
-                "`mode` should be 'train', 'val' or 'test', but got {}.".format(
-                    mode))
+                "`mode` should be 'train', 'val' or 'test', but got {}.".
+                format(mode))
 
 
         if self.transforms is None:
         if self.transforms is None:
             raise ValueError("`transforms` is necessary, but it is None.")
             raise ValueError("`transforms` is necessary, but it is None.")

+ 2 - 1
paddlex/paddleseg/datasets/voc.py

@@ -38,7 +38,8 @@ class PascalVOC(Dataset):
     """
     """
     NUM_CLASSES = 21
     NUM_CLASSES = 21
 
 
-    def __init__(self, transforms, dataset_root=None, mode='train', edge=False):
+    def __init__(self, transforms, dataset_root=None, mode='train',
+                 edge=False):
         self.dataset_root = dataset_root
         self.dataset_root = dataset_root
         self.transforms = Compose(transforms)
         self.transforms = Compose(transforms)
         mode = mode.lower()
         mode = mode.lower()

+ 3 - 1
paddlex/paddleseg/models/ann.py

@@ -143,7 +143,9 @@ class ANNHead(nn.Layer):
                 psp_size=psp_size))
                 psp_size=psp_size))
 
 
         self.cls = nn.Conv2D(
         self.cls = nn.Conv2D(
-            in_channels=inter_channels, out_channels=num_classes, kernel_size=1)
+            in_channels=inter_channels,
+            out_channels=num_classes,
+            kernel_size=1)
         self.auxlayer = layers.AuxLayer(
         self.auxlayer = layers.AuxLayer(
             in_channels=low_in_channels,
             in_channels=low_in_channels,
             inter_channels=low_in_channels // 2,
             inter_channels=low_in_channels // 2,

+ 22 - 11
paddlex/paddleseg/models/attention_unet.py

@@ -103,16 +103,20 @@ class AttentionBlock(nn.Layer):
     def __init__(self, F_g, F_l, F_out):
     def __init__(self, F_g, F_l, F_out):
         super().__init__()
         super().__init__()
         self.W_g = nn.Sequential(
         self.W_g = nn.Sequential(
-            nn.Conv2D(F_g, F_out, kernel_size=1, stride=1, padding=0),
+            nn.Conv2D(
+                F_g, F_out, kernel_size=1, stride=1, padding=0),
             nn.BatchNorm2D(F_out))
             nn.BatchNorm2D(F_out))
 
 
         self.W_x = nn.Sequential(
         self.W_x = nn.Sequential(
-            nn.Conv2D(F_l, F_out, kernel_size=1, stride=1, padding=0),
+            nn.Conv2D(
+                F_l, F_out, kernel_size=1, stride=1, padding=0),
             nn.BatchNorm2D(F_out))
             nn.BatchNorm2D(F_out))
 
 
         self.psi = nn.Sequential(
         self.psi = nn.Sequential(
-            nn.Conv2D(F_out, 1, kernel_size=1, stride=1, padding=0),
-            nn.BatchNorm2D(1), nn.Sigmoid())
+            nn.Conv2D(
+                F_out, 1, kernel_size=1, stride=1, padding=0),
+            nn.BatchNorm2D(1),
+            nn.Sigmoid())
 
 
         self.relu = nn.ReLU()
         self.relu = nn.ReLU()
 
 
@@ -129,9 +133,12 @@ class UpConv(nn.Layer):
     def __init__(self, ch_in, ch_out):
     def __init__(self, ch_in, ch_out):
         super().__init__()
         super().__init__()
         self.up = nn.Sequential(
         self.up = nn.Sequential(
-            nn.Upsample(scale_factor=2, mode="bilinear"),
-            nn.Conv2D(ch_in, ch_out, kernel_size=3, stride=1, padding=1),
-            nn.BatchNorm2D(ch_out), nn.ReLU())
+            nn.Upsample(
+                scale_factor=2, mode="bilinear"),
+            nn.Conv2D(
+                ch_in, ch_out, kernel_size=3, stride=1, padding=1),
+            nn.BatchNorm2D(ch_out),
+            nn.ReLU())
 
 
     def forward(self, x):
     def forward(self, x):
         return self.up(x)
         return self.up(x)
@@ -169,10 +176,14 @@ class ConvBlock(nn.Layer):
     def __init__(self, ch_in, ch_out):
     def __init__(self, ch_in, ch_out):
         super(ConvBlock, self).__init__()
         super(ConvBlock, self).__init__()
         self.conv = nn.Sequential(
         self.conv = nn.Sequential(
-            nn.Conv2D(ch_in, ch_out, kernel_size=3, stride=1, padding=1),
-            nn.BatchNorm2D(ch_out), nn.ReLU(),
-            nn.Conv2D(ch_out, ch_out, kernel_size=3, stride=1, padding=1),
-            nn.BatchNorm2D(ch_out), nn.ReLU())
+            nn.Conv2D(
+                ch_in, ch_out, kernel_size=3, stride=1, padding=1),
+            nn.BatchNorm2D(ch_out),
+            nn.ReLU(),
+            nn.Conv2D(
+                ch_out, ch_out, kernel_size=3, stride=1, padding=1),
+            nn.BatchNorm2D(ch_out),
+            nn.ReLU())
 
 
     def forward(self, x):
     def forward(self, x):
         return self.conv(x)
         return self.conv(x)

+ 2 - 1
paddlex/paddleseg/models/backbones/hrnet.py

@@ -24,7 +24,8 @@ from paddlex.paddleseg.utils import utils
 
 
 __all__ = [
 __all__ = [
     "HRNet_W18_Small_V1", "HRNet_W18_Small_V2", "HRNet_W18", "HRNet_W30",
     "HRNet_W18_Small_V1", "HRNet_W18_Small_V2", "HRNet_W18", "HRNet_W30",
-    "HRNet_W32", "HRNet_W40", "HRNet_W44", "HRNet_W48", "HRNet_W60", "HRNet_W64"
+    "HRNet_W32", "HRNet_W40", "HRNet_W44", "HRNet_W48", "HRNet_W60",
+    "HRNet_W64"
 ]
 ]
 
 
 
 

+ 2 - 1
paddlex/paddleseg/models/backbones/mobilenetv3.py

@@ -103,7 +103,8 @@ class MobileNetV3(nn.Layer):
                 [5, 144, 48, True, "hard_swish", 1],  # output 3 -> out_index=7
                 [5, 144, 48, True, "hard_swish", 1],  # output 3 -> out_index=7
                 [5, 288, 96, True, "hard_swish", 2],
                 [5, 288, 96, True, "hard_swish", 2],
                 [5, 576, 96, True, "hard_swish", 1],
                 [5, 576, 96, True, "hard_swish", 1],
-                [5, 576, 96, True, "hard_swish", 1],  # output 4 -> out_index=10
+                [5, 576, 96, True, "hard_swish", 1
+                 ],  # output 4 -> out_index=10
             ]
             ]
             self.out_indices = [0, 3, 7, 10]
             self.out_indices = [0, 3, 7, 10]
             self.feat_channels = [
             self.feat_channels = [

+ 12 - 9
paddlex/paddleseg/models/backbones/resnet_vd.py

@@ -35,8 +35,7 @@ class ConvBNLayer(nn.Layer):
             dilation=1,
             dilation=1,
             groups=1,
             groups=1,
             is_vd_mode=False,
             is_vd_mode=False,
-            act=None,
-    ):
+            act=None, ):
         super(ConvBNLayer, self).__init__()
         super(ConvBNLayer, self).__init__()
 
 
         self.is_vd_mode = is_vd_mode
         self.is_vd_mode = is_vd_mode
@@ -215,13 +214,13 @@ class ResNet_vd(nn.Layer):
             depth = [3, 8, 36, 3]
             depth = [3, 8, 36, 3]
         elif layers == 200:
         elif layers == 200:
             depth = [3, 12, 48, 3]
             depth = [3, 12, 48, 3]
-        num_channels = [64, 256, 512, 1024
-                        ] if layers >= 50 else [64, 64, 128, 256]
+        num_channels = [64, 256, 512,
+                        1024] if layers >= 50 else [64, 64, 128, 256]
         num_filters = [64, 128, 256, 512]
         num_filters = [64, 128, 256, 512]
 
 
         # for channels of four returned stages
         # for channels of four returned stages
-        self.feat_channels = [c * 4 for c in num_filters
-                              ] if layers >= 50 else num_filters
+        self.feat_channels = [c * 4 for c in
+                              num_filters] if layers >= 50 else num_filters
 
 
         dilation_dict = None
         dilation_dict = None
         if output_stride == 8:
         if output_stride == 8:
@@ -230,7 +229,11 @@ class ResNet_vd(nn.Layer):
             dilation_dict = {3: 2}
             dilation_dict = {3: 2}
 
 
         self.conv1_1 = ConvBNLayer(
         self.conv1_1 = ConvBNLayer(
-            in_channels=3, out_channels=32, kernel_size=3, stride=2, act='relu')
+            in_channels=3,
+            out_channels=32,
+            kernel_size=3,
+            stride=2,
+            act='relu')
         self.conv1_2 = ConvBNLayer(
         self.conv1_2 = ConvBNLayer(
             in_channels=32,
             in_channels=32,
             out_channels=32,
             out_channels=32,
@@ -277,8 +280,8 @@ class ResNet_vd(nn.Layer):
                             in_channels=num_channels[block]
                             in_channels=num_channels[block]
                             if i == 0 else num_filters[block] * 4,
                             if i == 0 else num_filters[block] * 4,
                             out_channels=num_filters[block],
                             out_channels=num_filters[block],
-                            stride=2 if i == 0 and block != 0
-                            and dilation_rate == 1 else 1,
+                            stride=2 if i == 0 and block != 0 and
+                            dilation_rate == 1 else 1,
                             shortcut=shortcut,
                             shortcut=shortcut,
                             if_first=block == i == 0,
                             if_first=block == i == 0,
                             dilation=dilation_rate))
                             dilation=dilation_rate))

+ 19 - 11
paddlex/paddleseg/models/bisenet.py

@@ -109,7 +109,8 @@ class StemBlock(nn.Layer):
 
 
         self.left = nn.Sequential(
         self.left = nn.Sequential(
             layers.ConvBNReLU(out_dim, out_dim // 2, 1),
             layers.ConvBNReLU(out_dim, out_dim // 2, 1),
-            layers.ConvBNReLU(out_dim // 2, out_dim, 3, stride=2))
+            layers.ConvBNReLU(
+                out_dim // 2, out_dim, 3, stride=2))
 
 
         self.right = nn.MaxPool2D(kernel_size=3, stride=2, padding=1)
         self.right = nn.MaxPool2D(kernel_size=3, stride=2, padding=1)
 
 
@@ -167,12 +168,14 @@ class GatherAndExpansionLayer2(nn.Layer):
 
 
         self.branch_1 = nn.Sequential(
         self.branch_1 = nn.Sequential(
             layers.ConvBNReLU(in_dim, in_dim, 3),
             layers.ConvBNReLU(in_dim, in_dim, 3),
-            layers.DepthwiseConvBN(in_dim, expand_dim, 3, stride=2),
+            layers.DepthwiseConvBN(
+                in_dim, expand_dim, 3, stride=2),
             layers.DepthwiseConvBN(expand_dim, expand_dim, 3),
             layers.DepthwiseConvBN(expand_dim, expand_dim, 3),
             layers.ConvBN(expand_dim, out_dim, 1))
             layers.ConvBN(expand_dim, out_dim, 1))
 
 
         self.branch_2 = nn.Sequential(
         self.branch_2 = nn.Sequential(
-            layers.DepthwiseConvBN(in_dim, in_dim, 3, stride=2),
+            layers.DepthwiseConvBN(
+                in_dim, in_dim, 3, stride=2),
             layers.ConvBN(in_dim, out_dim, 1))
             layers.ConvBN(in_dim, out_dim, 1))
 
 
     def forward(self, x):
     def forward(self, x):
@@ -189,17 +192,19 @@ class DetailBranch(nn.Layer):
 
 
         self.convs = nn.Sequential(
         self.convs = nn.Sequential(
             # stage 1
             # stage 1
-            layers.ConvBNReLU(3, C1, 3, stride=2),
+            layers.ConvBNReLU(
+                3, C1, 3, stride=2),
             layers.ConvBNReLU(C1, C1, 3),
             layers.ConvBNReLU(C1, C1, 3),
             # stage 2
             # stage 2
-            layers.ConvBNReLU(C1, C2, 3, stride=2),
+            layers.ConvBNReLU(
+                C1, C2, 3, stride=2),
             layers.ConvBNReLU(C2, C2, 3),
             layers.ConvBNReLU(C2, C2, 3),
             layers.ConvBNReLU(C2, C2, 3),
             layers.ConvBNReLU(C2, C2, 3),
             # stage 3
             # stage 3
-            layers.ConvBNReLU(C2, C3, 3, stride=2),
+            layers.ConvBNReLU(
+                C2, C3, 3, stride=2),
             layers.ConvBNReLU(C3, C3, 3),
             layers.ConvBNReLU(C3, C3, 3),
-            layers.ConvBNReLU(C3, C3, 3),
-        )
+            layers.ConvBNReLU(C3, C3, 3), )
 
 
     def forward(self, x):
     def forward(self, x):
         return self.convs(x)
         return self.convs(x)
@@ -252,12 +257,15 @@ class BGA(nn.Layer):
             nn.Conv2D(out_dim, out_dim, 1))
             nn.Conv2D(out_dim, out_dim, 1))
 
 
         self.db_branch_down = nn.Sequential(
         self.db_branch_down = nn.Sequential(
-            layers.ConvBN(out_dim, out_dim, 3, stride=2),
-            nn.AvgPool2D(kernel_size=3, stride=2, padding=1))
+            layers.ConvBN(
+                out_dim, out_dim, 3, stride=2),
+            nn.AvgPool2D(
+                kernel_size=3, stride=2, padding=1))
 
 
         self.sb_branch_keep = nn.Sequential(
         self.sb_branch_keep = nn.Sequential(
             layers.DepthwiseConvBN(out_dim, out_dim, 3),
             layers.DepthwiseConvBN(out_dim, out_dim, 3),
-            nn.Conv2D(out_dim, out_dim, 1), layers.Activation(act='sigmoid'))
+            nn.Conv2D(out_dim, out_dim, 1),
+            layers.Activation(act='sigmoid'))
 
 
         self.sb_branch_up = layers.ConvBN(out_dim, out_dim, 3)
         self.sb_branch_up = layers.ConvBN(out_dim, out_dim, 3)
 
 

+ 5 - 3
paddlex/paddleseg/models/danet.py

@@ -50,7 +50,9 @@ class DANet(nn.Layer):
 
 
         self.backbone = backbone
         self.backbone = backbone
         self.backbone_indices = backbone_indices
         self.backbone_indices = backbone_indices
-        in_channels = [self.backbone.feat_channels[i] for i in backbone_indices]
+        in_channels = [
+            self.backbone.feat_channels[i] for i in backbone_indices
+        ]
 
 
         self.head = DAHead(num_classes=num_classes, in_channels=in_channels)
         self.head = DAHead(num_classes=num_classes, in_channels=in_channels)
 
 
@@ -205,8 +207,8 @@ class CAM(nn.Layer):
         # sim: n, c, c
         # sim: n, c, c
         sim = paddle.bmm(query, key)
         sim = paddle.bmm(query, key)
         # The danet author claims that this can avoid gradient divergence
         # The danet author claims that this can avoid gradient divergence
-        sim = paddle.max(
-            sim, axis=-1, keepdim=True).tile([1, 1, self.channels]) - sim
+        sim = paddle.max(sim, axis=-1, keepdim=True).tile(
+            [1, 1, self.channels]) - sim
         sim = F.softmax(sim, axis=-1)
         sim = F.softmax(sim, axis=-1)
 
 
         # feat: from (n, c, h * w) to (n, c, h, w)
         # feat: from (n, c, h * w) to (n, c, h, w)

+ 13 - 6
paddlex/paddleseg/models/decoupled_segnet.py

@@ -126,14 +126,17 @@ class DecoupledSegNetHead(nn.Layer):
                 in_channels=256,
                 in_channels=256,
                 out_channels=48,
                 out_channels=48,
                 kernel_size=3,
                 kernel_size=3,
-                bias_attr=False), nn.Conv2D(48, 1, 1, bias_attr=False))
+                bias_attr=False),
+            nn.Conv2D(
+                48, 1, 1, bias_attr=False))
         self.dsn_seg_body = nn.Sequential(
         self.dsn_seg_body = nn.Sequential(
             layers.ConvBNReLU(
             layers.ConvBNReLU(
                 in_channels=256,
                 in_channels=256,
                 out_channels=256,
                 out_channels=256,
                 kernel_size=3,
                 kernel_size=3,
-                bias_attr=False), nn.Conv2D(
-                    256, num_classes, 1, bias_attr=False))
+                bias_attr=False),
+            nn.Conv2D(
+                256, num_classes, 1, bias_attr=False))
 
 
         self.final_seg = nn.Sequential(
         self.final_seg = nn.Sequential(
             layers.ConvBNReLU(
             layers.ConvBNReLU(
@@ -146,7 +149,8 @@ class DecoupledSegNetHead(nn.Layer):
                 out_channels=256,
                 out_channels=256,
                 kernel_size=3,
                 kernel_size=3,
                 bias_attr=False),
                 bias_attr=False),
-            nn.Conv2D(256, num_classes, kernel_size=1, bias_attr=False))
+            nn.Conv2D(
+                256, num_classes, kernel_size=1, bias_attr=False))
 
 
     def forward(self, feat_list):
     def forward(self, feat_list):
         fine_fea = feat_list[self.backbone_indices[0]]
         fine_fea = feat_list[self.backbone_indices[0]]
@@ -163,7 +167,9 @@ class DecoupledSegNetHead(nn.Layer):
             fine_size[2:],
             fine_size[2:],
             mode='bilinear',
             mode='bilinear',
             align_corners=self.align_corners)
             align_corners=self.align_corners)
-        seg_edge = self.edge_fusion(paddle.concat([seg_edge, fine_fea], axis=1))
+        seg_edge = self.edge_fusion(
+            paddle.concat(
+                [seg_edge, fine_fea], axis=1))
         seg_edge_out = self.edge_out(seg_edge)
         seg_edge_out = self.edge_out(seg_edge)
         seg_edge_out = self.sigmoid_edge(seg_edge_out)  # seg_edge output
         seg_edge_out = self.sigmoid_edge(seg_edge_out)  # seg_edge output
         seg_body_out = self.dsn_seg_body(seg_body)  # body out
         seg_body_out = self.dsn_seg_body(seg_body)  # body out
@@ -218,7 +224,8 @@ class SqueezeBodyEdge(nn.Layer):
         h_grid = h_grid.tile([size[1]])
         h_grid = h_grid.tile([size[1]])
         w_grid = paddle.linspace(-1.0, 1.0, size[1]).reshape([-1, 1])
         w_grid = paddle.linspace(-1.0, 1.0, size[1]).reshape([-1, 1])
         w_grid = w_grid.tile([size[0]]).transpose([1, 0])
         w_grid = w_grid.tile([size[0]]).transpose([1, 0])
-        grid = paddle.concat([w_grid.unsqueeze(2), h_grid.unsqueeze(2)], axis=2)
+        grid = paddle.concat(
+            [w_grid.unsqueeze(2), h_grid.unsqueeze(2)], axis=2)
         grid.unsqueeze(0).tile([input_shape[0], 1, 1, 1])
         grid.unsqueeze(0).tile([input_shape[0], 1, 1, 1])
         grid = grid + paddle.transpose(flow, (0, 2, 3, 1)) / norm
         grid = grid + paddle.transpose(flow, (0, 2, 3, 1)) / norm
 
 

+ 2 - 1
paddlex/paddleseg/models/deeplab.py

@@ -117,7 +117,8 @@ class DeepLabV3PHead(nn.Layer):
             align_corners,
             align_corners,
             use_sep_conv=True,
             use_sep_conv=True,
             image_pooling=True)
             image_pooling=True)
-        self.decoder = Decoder(num_classes, backbone_channels[0], align_corners)
+        self.decoder = Decoder(num_classes, backbone_channels[0],
+                               align_corners)
         self.backbone_indices = backbone_indices
         self.backbone_indices = backbone_indices
 
 
     def forward(self, feat_list):
     def forward(self, feat_list):

+ 9 - 4
paddlex/paddleseg/models/dnlnet.py

@@ -60,7 +60,9 @@ class DNLNet(nn.Layer):
         super().__init__()
         super().__init__()
         self.backbone = backbone
         self.backbone = backbone
         self.backbone_indices = backbone_indices
         self.backbone_indices = backbone_indices
-        in_channels = [self.backbone.feat_channels[i] for i in backbone_indices]
+        in_channels = [
+            self.backbone.feat_channels[i] for i in backbone_indices
+        ]
         self.head = DNLHead(num_classes, in_channels, reduction, use_scale,
         self.head = DNLHead(num_classes, in_channels, reduction, use_scale,
                             mode, temperature, concat_input,
                             mode, temperature, concat_input,
                             enable_auxiliary_loss)
                             enable_auxiliary_loss)
@@ -143,7 +145,8 @@ class DNLHead(nn.Layer):
                 in_channels=1024,
                 in_channels=1024,
                 out_channels=256,
                 out_channels=256,
                 kernel_size=3,
                 kernel_size=3,
-                bias_attr=False), nn.Dropout2D(p=0.1),
+                bias_attr=False),
+            nn.Dropout2D(p=0.1),
             nn.Conv2D(256, num_classes, 1))
             nn.Conv2D(256, num_classes, 1))
         if self.concat_input:
         if self.concat_input:
             self.conv_cat = layers.ConvBNReLU(
             self.conv_cat = layers.ConvBNReLU(
@@ -196,14 +199,16 @@ class DisentangledNonLocal2D(layers.NonLocal2D):
             theta_x = paddle.transpose(
             theta_x = paddle.transpose(
                 x.reshape([0, self.in_channels, -1]), [0, 2, 1])
                 x.reshape([0, self.in_channels, -1]), [0, 2, 1])
             if self.sub_sample:
             if self.sub_sample:
-                phi_x = paddle.transpose(self.phi(x), [0, self.in_channels, -1])
+                phi_x = paddle.transpose(
+                    self.phi(x), [0, self.in_channels, -1])
             else:
             else:
                 phi_x = paddle.transpose(x, [0, self.in_channels, -1])
                 phi_x = paddle.transpose(x, [0, self.in_channels, -1])
 
 
         elif self.mode == "concatenation":
         elif self.mode == "concatenation":
             theta_x = paddle.reshape(
             theta_x = paddle.reshape(
                 self.theta(x), [0, self.inter_channels, -1, 1])
                 self.theta(x), [0, self.inter_channels, -1, 1])
-            phi_x = paddle.reshape(self.phi(x), [0, self.inter_channels, 1, -1])
+            phi_x = paddle.reshape(
+                self.phi(x), [0, self.inter_channels, 1, -1])
 
 
         else:
         else:
             theta_x = self.theta(x).reshape([0, self.inter_channels,
             theta_x = self.theta(x).reshape([0, self.inter_channels,

+ 10 - 6
paddlex/paddleseg/models/emanet.py

@@ -63,10 +63,12 @@ class EMANet(nn.Layer):
 
 
         self.backbone = backbone
         self.backbone = backbone
         self.backbone_indices = backbone_indices
         self.backbone_indices = backbone_indices
-        in_channels = [self.backbone.feat_channels[i] for i in backbone_indices]
-        self.head = EMAHead(num_classes, in_channels, ema_channels, gc_channels,
-                            num_bases, stage_num, momentum, concat_input,
-                            enable_auxiliary_loss)
+        in_channels = [
+            self.backbone.feat_channels[i] for i in backbone_indices
+        ]
+        self.head = EMAHead(num_classes, in_channels, ema_channels,
+                            gc_channels, num_bases, stage_num, momentum,
+                            concat_input, enable_auxiliary_loss)
         self.align_corners = align_corners
         self.align_corners = align_corners
         self.pretrained = pretrained
         self.pretrained = pretrained
         self.init_weight()
         self.init_weight()
@@ -127,7 +129,8 @@ class EMAHead(nn.Layer):
             in_channels=self.in_channels,
             in_channels=self.in_channels,
             out_channels=ema_channels,
             out_channels=ema_channels,
             kernel_size=3)
             kernel_size=3)
-        self.ema_mid_conv = nn.Conv2D(ema_channels, ema_channels, kernel_size=1)
+        self.ema_mid_conv = nn.Conv2D(
+            ema_channels, ema_channels, kernel_size=1)
         self.ema_out_conv = layers.ConvBNReLU(
         self.ema_out_conv = layers.ConvBNReLU(
             in_channels=ema_channels, out_channels=ema_channels, kernel_size=1)
             in_channels=ema_channels, out_channels=ema_channels, kernel_size=1)
         self.bottleneck = layers.ConvBNReLU(
         self.bottleneck = layers.ConvBNReLU(
@@ -137,7 +140,8 @@ class EMAHead(nn.Layer):
         self.aux = nn.Sequential(
         self.aux = nn.Sequential(
             layers.ConvBNReLU(
             layers.ConvBNReLU(
                 in_channels=1024, out_channels=256, kernel_size=3),
                 in_channels=1024, out_channels=256, kernel_size=3),
-            nn.Dropout2D(p=0.1), nn.Conv2D(256, num_classes, 1))
+            nn.Dropout2D(p=0.1),
+            nn.Conv2D(256, num_classes, 1))
         if self.concat_input:
         if self.concat_input:
             self.conv_cat = layers.ConvBNReLU(
             self.conv_cat = layers.ConvBNReLU(
                 self.in_channels + gc_channels, gc_channels, kernel_size=3)
                 self.in_channels + gc_channels, gc_channels, kernel_size=3)

+ 3 - 1
paddlex/paddleseg/models/fast_scnn.py

@@ -304,7 +304,9 @@ class Classifier(nn.Layer):
             padding=1)
             padding=1)
 
 
         self.conv = nn.Conv2D(
         self.conv = nn.Conv2D(
-            in_channels=input_channels, out_channels=num_classes, kernel_size=1)
+            in_channels=input_channels,
+            out_channels=num_classes,
+            kernel_size=1)
 
 
         self.dropout = nn.Dropout(p=0.1)  # dropout_prob
         self.dropout = nn.Dropout(p=0.1)  # dropout_prob
 
 

+ 2 - 1
paddlex/paddleseg/models/gcnet.py

@@ -188,7 +188,8 @@ class GlobalContextBlock(nn.Layer):
                 in_channels=in_channels,
                 in_channels=in_channels,
                 out_channels=inter_channels,
                 out_channels=inter_channels,
                 kernel_size=1),
                 kernel_size=1),
-            nn.LayerNorm(normalized_shape=[inter_channels, 1, 1]), nn.ReLU(),
+            nn.LayerNorm(normalized_shape=[inter_channels, 1, 1]),
+            nn.ReLU(),
             nn.Conv2D(
             nn.Conv2D(
                 in_channels=inter_channels,
                 in_channels=inter_channels,
                 out_channels=in_channels,
                 out_channels=in_channels,

+ 7 - 3
paddlex/paddleseg/models/gscnn.py

@@ -237,9 +237,13 @@ class GatedSpatailConv2d(nn.Layer):
         super().__init__()
         super().__init__()
         self._gate_conv = nn.Sequential(
         self._gate_conv = nn.Sequential(
             layers.SyncBatchNorm(in_channels + 1),
             layers.SyncBatchNorm(in_channels + 1),
-            nn.Conv2D(in_channels + 1, in_channels + 1, kernel_size=1),
-            nn.ReLU(), nn.Conv2D(in_channels + 1, 1, kernel_size=1),
-            layers.SyncBatchNorm(1), nn.Sigmoid())
+            nn.Conv2D(
+                in_channels + 1, in_channels + 1, kernel_size=1),
+            nn.ReLU(),
+            nn.Conv2D(
+                in_channels + 1, 1, kernel_size=1),
+            layers.SyncBatchNorm(1),
+            nn.Sigmoid())
         self.conv = nn.Conv2D(
         self.conv = nn.Conv2D(
             in_channels,
             in_channels,
             out_channels,
             out_channels,

+ 2 - 1
paddlex/paddleseg/models/hardnet.py

@@ -255,7 +255,8 @@ class HarDBlock(nn.Layer):
 
 
             self.links.append(link)
             self.links.append(link)
             layers_.append(
             layers_.append(
-                layers.ConvBNReLU(inch, outch, kernel_size=3, bias_attr=False))
+                layers.ConvBNReLU(
+                    inch, outch, kernel_size=3, bias_attr=False))
             if (i % 2 == 0) or (i == n_layers - 1):
             if (i % 2 == 0) or (i == n_layers - 1):
                 self.out_channels += outch
                 self.out_channels += outch
         self.layers = nn.LayerList(layers_)
         self.layers = nn.LayerList(layers_)

+ 17 - 12
paddlex/paddleseg/models/isanet.py

@@ -54,9 +54,11 @@ class ISANet(nn.Layer):
 
 
         self.backbone = backbone
         self.backbone = backbone
         self.backbone_indices = backbone_indices
         self.backbone_indices = backbone_indices
-        in_channels = [self.backbone.feat_channels[i] for i in backbone_indices]
-        self.head = ISAHead(num_classes, in_channels, isa_channels, down_factor,
-                            enable_auxiliary_loss)
+        in_channels = [
+            self.backbone.feat_channels[i] for i in backbone_indices
+        ]
+        self.head = ISAHead(num_classes, in_channels, isa_channels,
+                            down_factor, enable_auxiliary_loss)
         self.align_corners = align_corners
         self.align_corners = align_corners
         self.pretrained = pretrained
         self.pretrained = pretrained
         self.init_weight()
         self.init_weight()
@@ -114,7 +116,8 @@ class ISAHead(nn.Layer):
                 in_channels=1024,
                 in_channels=1024,
                 out_channels=256,
                 out_channels=256,
                 kernel_size=3,
                 kernel_size=3,
-                bias_attr=False), nn.Dropout2D(p=0.1),
+                bias_attr=False),
+            nn.Dropout2D(p=0.1),
             nn.Conv2D(256, num_classes, 1))
             nn.Conv2D(256, num_classes, 1))
 
 
     def forward(self, feat_list):
     def forward(self, feat_list):
@@ -127,22 +130,24 @@ class ISAHead(nn.Layer):
         pad_h, pad_w = (Q_h * P_h - x_shape[2]).astype('int32'), (
         pad_h, pad_w = (Q_h * P_h - x_shape[2]).astype('int32'), (
             Q_w * P_w - x_shape[3]).astype('int32')
             Q_w * P_w - x_shape[3]).astype('int32')
         if pad_h > 0 or pad_w > 0:
         if pad_h > 0 or pad_w > 0:
-            padding = paddle.concat([
-                pad_w // 2, pad_w - pad_w // 2, pad_h // 2, pad_h - pad_h // 2
-            ],
-                                    axis=0)
+            padding = paddle.concat(
+                [
+                    pad_w // 2, pad_w - pad_w // 2, pad_h // 2,
+                    pad_h - pad_h // 2
+                ],
+                axis=0)
             feat = F.pad(x, padding)
             feat = F.pad(x, padding)
         else:
         else:
             feat = x
             feat = x
 
 
         feat = feat.reshape([0, x_shape[1], Q_h, P_h, Q_w, P_w])
         feat = feat.reshape([0, x_shape[1], Q_h, P_h, Q_w, P_w])
-        feat = feat.transpose([0, 3, 5, 1, 2,
-                               4]).reshape([-1, self.inter_channels, Q_h, Q_w])
+        feat = feat.transpose(
+            [0, 3, 5, 1, 2, 4]).reshape([-1, self.inter_channels, Q_h, Q_w])
         feat = self.global_relation(feat)
         feat = self.global_relation(feat)
 
 
         feat = feat.reshape([x_shape[0], P_h, P_w, x_shape[1], Q_h, Q_w])
         feat = feat.reshape([x_shape[0], P_h, P_w, x_shape[1], Q_h, Q_w])
-        feat = feat.transpose([0, 4, 5, 3, 1,
-                               2]).reshape([-1, self.inter_channels, P_h, P_w])
+        feat = feat.transpose(
+            [0, 4, 5, 3, 1, 2]).reshape([-1, self.inter_channels, P_h, P_w])
         feat = self.local_relation(feat)
         feat = self.local_relation(feat)
 
 
         feat = feat.reshape([x_shape[0], Q_h, Q_w, x_shape[1], P_h, P_w])
         feat = feat.reshape([x_shape[0], Q_h, Q_w, x_shape[1], P_h, P_w])

+ 2 - 2
paddlex/paddleseg/models/layers/activation.py

@@ -60,8 +60,8 @@ class Activation(nn.Layer):
         if act is not None:
         if act is not None:
             if act in act_dict.keys():
             if act in act_dict.keys():
                 act_name = act_dict[act]
                 act_name = act_dict[act]
-                self.act_func = eval(
-                    "nn.layer.activation.{}()".format(act_name))
+                self.act_func = eval("nn.layer.activation.{}()".format(
+                    act_name))
             else:
             else:
                 raise KeyError("{} does not exist in the current {}".format(
                 raise KeyError("{} does not exist in the current {}".format(
                     act, act_dict.keys()))
                     act, act_dict.keys()))

+ 2 - 1
paddlex/paddleseg/models/layers/layer_libs.py

@@ -21,7 +21,8 @@ import paddle.nn.functional as F
 
 
 def SyncBatchNorm(*args, **kwargs):
 def SyncBatchNorm(*args, **kwargs):
     """In cpu environment nn.SyncBatchNorm does not have kernel so use nn.BatchNorm2D instead"""
     """In cpu environment nn.SyncBatchNorm does not have kernel so use nn.BatchNorm2D instead"""
-    if paddle.get_device() == 'cpu' or os.environ.get('PADDLESEG_EXPORT_STAGE'):
+    if paddle.get_device() == 'cpu' or os.environ.get(
+            'PADDLESEG_EXPORT_STAGE'):
         return nn.BatchNorm2D(*args, **kwargs)
         return nn.BatchNorm2D(*args, **kwargs)
     else:
     else:
         return nn.SyncBatchNorm(*args, **kwargs)
         return nn.SyncBatchNorm(*args, **kwargs)

+ 2 - 1
paddlex/paddleseg/models/layers/nonlocal2d.py

@@ -135,7 +135,8 @@ class NonLocal2D(nn.Layer):
         elif self.mode == 'concatenation':
         elif self.mode == 'concatenation':
             theta_x = paddle.reshape(
             theta_x = paddle.reshape(
                 self.theta(x), [n, self.inter_channels, -1, 1])
                 self.theta(x), [n, self.inter_channels, -1, 1])
-            phi_x = paddle.reshape(self.phi(x), [n, self.inter_channels, 1, -1])
+            phi_x = paddle.reshape(
+                self.phi(x), [n, self.inter_channels, 1, -1])
 
 
         else:
         else:
             theta_x = paddle.reshape(
             theta_x = paddle.reshape(

+ 2 - 1
paddlex/paddleseg/models/losses/cross_entropy_loss.py

@@ -34,7 +34,8 @@ class CrossEntropyLoss(nn.Layer):
             the top k percent pixels (e.g., the top 20% pixels). This is useful for hard pixel mining.
             the top k percent pixels (e.g., the top 20% pixels). This is useful for hard pixel mining.
     """
     """
 
 
-    def __init__(self, weight=None, ignore_index=255, top_k_percent_pixels=1.0):
+    def __init__(self, weight=None, ignore_index=255,
+                 top_k_percent_pixels=1.0):
         super(CrossEntropyLoss, self).__init__()
         super(CrossEntropyLoss, self).__init__()
         if weight is not None:
         if weight is not None:
             weight = paddle.to_tensor(weight, dtype='float32')
             weight = paddle.to_tensor(weight, dtype='float32')

+ 4 - 4
paddlex/paddleseg/models/losses/lovasz_loss.py

@@ -77,8 +77,8 @@ class LovaszHingeLoss(nn.Layer):
         """
         """
         if logits.shape[1] == 2:
         if logits.shape[1] == 2:
             logits = binary_channel_to_unary(logits)
             logits = binary_channel_to_unary(logits)
-        loss = lovasz_hinge_flat(
-            *flatten_binary_scores(logits, labels, self.ignore_index))
+        loss = lovasz_hinge_flat(*flatten_binary_scores(logits, labels,
+                                                        self.ignore_index))
         return loss
         return loss
 
 
 
 
@@ -181,8 +181,8 @@ def lovasz_softmax_flat(probas, labels, classes='present'):
         else:
         else:
             class_pred = probas[:, c]
             class_pred = probas[:, c]
         errors = paddle.abs(fg - class_pred)
         errors = paddle.abs(fg - class_pred)
-        errors_sorted, perm = paddle.fluid.core.ops.argsort(
-            errors, 'axis', 0, 'descending', True)
+        errors_sorted, perm = paddle.fluid.core.ops.argsort(errors, 'axis', 0,
+                                                            'descending', True)
         errors_sorted.stop_gradient = False
         errors_sorted.stop_gradient = False
 
 
         fg_sorted = paddle.gather(fg, perm)
         fg_sorted = paddle.gather(fg, perm)

+ 5 - 3
paddlex/paddleseg/models/ocrnet.py

@@ -55,7 +55,9 @@ class OCRNet(nn.Layer):
 
 
         self.backbone = backbone
         self.backbone = backbone
         self.backbone_indices = backbone_indices
         self.backbone_indices = backbone_indices
-        in_channels = [self.backbone.feat_channels[i] for i in backbone_indices]
+        in_channels = [
+            self.backbone.feat_channels[i] for i in backbone_indices
+        ]
 
 
         self.head = OCRHead(
         self.head = OCRHead(
             num_classes=num_classes,
             num_classes=num_classes,
@@ -239,8 +241,8 @@ class ObjectAttentionBlock(nn.Layer):
         # context from (n, h1*w1, key_channels) to (n , out_channels, h1, w1)
         # context from (n, h1*w1, key_channels) to (n , out_channels, h1, w1)
         context = paddle.bmm(sim_map, value)
         context = paddle.bmm(sim_map, value)
         context = paddle.transpose(context, (0, 2, 1))
         context = paddle.transpose(context, (0, 2, 1))
-        context = paddle.reshape(context,
-                                 (0, self.key_channels, x_shape[2], x_shape[3]))
+        context = paddle.reshape(
+            context, (0, self.key_channels, x_shape[2], x_shape[3]))
         context = self.f_up(context)
         context = self.f_up(context)
 
 
         return context
         return context

+ 2 - 2
paddlex/paddleseg/models/pspnet.py

@@ -58,8 +58,8 @@ class PSPNet(nn.Layer):
             backbone.feat_channels[i] for i in backbone_indices
             backbone.feat_channels[i] for i in backbone_indices
         ]
         ]
 
 
-        self.head = PSPNetHead(num_classes, backbone_indices, backbone_channels,
-                               pp_out_channels, bin_sizes,
+        self.head = PSPNetHead(num_classes, backbone_indices,
+                               backbone_channels, pp_out_channels, bin_sizes,
                                enable_auxiliary_loss, align_corners)
                                enable_auxiliary_loss, align_corners)
         self.align_corners = align_corners
         self.align_corners = align_corners
         self.pretrained = pretrained
         self.pretrained = pretrained

+ 10 - 6
paddlex/paddleseg/models/sfnet.py

@@ -132,12 +132,15 @@ class SFNetHead(nn.Layer):
         for i in range(len(fpn_inplanes) - 1):
         for i in range(len(fpn_inplanes) - 1):
             self.fpn_out.append(
             self.fpn_out.append(
                 nn.Sequential(
                 nn.Sequential(
-                    layers.ConvBNReLU(fpn_dim, fpn_dim, 3, bias_attr=False)))
+                    layers.ConvBNReLU(
+                        fpn_dim, fpn_dim, 3, bias_attr=False)))
             self.fpn_out_align.append(
             self.fpn_out_align.append(
-                AlignedModule(inplane=fpn_dim, outplane=fpn_dim // 2))
+                AlignedModule(
+                    inplane=fpn_dim, outplane=fpn_dim // 2))
             if self.enable_auxiliary_loss:
             if self.enable_auxiliary_loss:
                 self.dsn.append(
                 self.dsn.append(
-                    nn.Sequential(layers.AuxLayer(fpn_dim, fpn_dim, num_class)))
+                    nn.Sequential(
+                        layers.AuxLayer(fpn_dim, fpn_dim, num_class)))
 
 
         self.fpn_out = nn.LayerList(self.fpn_out)
         self.fpn_out = nn.LayerList(self.fpn_out)
         self.fpn_out_align = nn.LayerList(self.fpn_out_align)
         self.fpn_out_align = nn.LayerList(self.fpn_out_align)
@@ -148,7 +151,8 @@ class SFNetHead(nn.Layer):
         self.conv_last = nn.Sequential(
         self.conv_last = nn.Sequential(
             layers.ConvBNReLU(
             layers.ConvBNReLU(
                 len(fpn_inplanes) * fpn_dim, fpn_dim, 3, bias_attr=False),
                 len(fpn_inplanes) * fpn_dim, fpn_dim, 3, bias_attr=False),
-            nn.Conv2D(fpn_dim, num_class, kernel_size=1))
+            nn.Conv2D(
+                fpn_dim, num_class, kernel_size=1))
 
 
     def forward(self, conv_out):
     def forward(self, conv_out):
         psp_out = self.ppm(conv_out[-1])
         psp_out = self.ppm(conv_out[-1])
@@ -211,8 +215,8 @@ class AlignedModule(nn.Layer):
         norm = paddle.to_tensor([[[[out_w, out_h]]]]).astype('float32')
         norm = paddle.to_tensor([[[[out_w, out_h]]]]).astype('float32')
         h = paddle.linspace(-1.0, 1.0, out_h).reshape([-1, 1]).tile([1, out_w])
         h = paddle.linspace(-1.0, 1.0, out_h).reshape([-1, 1]).tile([1, out_w])
         w = paddle.linspace(-1.0, 1.0, out_w).tile([out_h, 1])
         w = paddle.linspace(-1.0, 1.0, out_w).tile([out_h, 1])
-        grid = paddle.concat([paddle.unsqueeze(w, 2),
-                              paddle.unsqueeze(h, 2)], 2)
+        grid = paddle.concat([paddle.unsqueeze(w, 2), paddle.unsqueeze(h, 2)],
+                             2)
         grid = grid.tile([n, 1, 1, 1]).astype('float32')
         grid = grid.tile([n, 1, 1, 1]).astype('float32')
         grid = grid + flow.transpose([0, 2, 3, 1]) / norm
         grid = grid + flow.transpose([0, 2, 3, 1]) / norm
         output = F.grid_sample(inputs, grid)
         output = F.grid_sample(inputs, grid)

+ 26 - 8
paddlex/paddleseg/models/shufflenet_slim.py

@@ -35,15 +35,32 @@ class ShuffleNetV2(nn.Layer):
         self.conv_bn1 = _ConvBNReLU(36, 18, 1, 1, 0)
         self.conv_bn1 = _ConvBNReLU(36, 18, 1, 1, 0)
 
 
         self.block1 = nn.Sequential(
         self.block1 = nn.Sequential(
-            SFNetV2Module(36, stride=2, out_channels=72),
-            SFNetV2Module(72, stride=1), SFNetV2Module(72, stride=1),
-            SFNetV2Module(72, stride=1))
+            SFNetV2Module(
+                36, stride=2, out_channels=72),
+            SFNetV2Module(
+                72, stride=1),
+            SFNetV2Module(
+                72, stride=1),
+            SFNetV2Module(
+                72, stride=1))
 
 
         self.block2 = nn.Sequential(
         self.block2 = nn.Sequential(
-            SFNetV2Module(72, stride=2), SFNetV2Module(144, stride=1),
-            SFNetV2Module(144, stride=1), SFNetV2Module(144, stride=1),
-            SFNetV2Module(144, stride=1), SFNetV2Module(144, stride=1),
-            SFNetV2Module(144, stride=1), SFNetV2Module(144, stride=1))
+            SFNetV2Module(
+                72, stride=2),
+            SFNetV2Module(
+                144, stride=1),
+            SFNetV2Module(
+                144, stride=1),
+            SFNetV2Module(
+                144, stride=1),
+            SFNetV2Module(
+                144, stride=1),
+            SFNetV2Module(
+                144, stride=1),
+            SFNetV2Module(
+                144, stride=1),
+            SFNetV2Module(
+                144, stride=1))
 
 
         self.depthwise_separable0 = _SeparableConvBNReLU(144, 64, 3, stride=1)
         self.depthwise_separable0 = _SeparableConvBNReLU(144, 64, 3, stride=1)
         self.depthwise_separable1 = _SeparableConvBNReLU(82, 64, 3, stride=1)
         self.depthwise_separable1 = _SeparableConvBNReLU(82, 64, 3, stride=1)
@@ -227,7 +244,8 @@ class SFNetV2Module(nn.Layer):
         h, w = out_shape[2], out_shape[3]
         h, w = out_shape[2], out_shape[3]
         output = paddle.reshape(x=output, shape=[0, 2, self.in_channels, h, w])
         output = paddle.reshape(x=output, shape=[0, 2, self.in_channels, h, w])
         output = paddle.transpose(x=output, perm=[0, 2, 1, 3, 4])
         output = paddle.transpose(x=output, perm=[0, 2, 1, 3, 4])
-        output = paddle.reshape(x=output, shape=[0, 2 * self.in_channels, h, w])
+        output = paddle.reshape(
+            x=output, shape=[0, 2 * self.in_channels, h, w])
         return output
         return output
 
 
 
 

+ 22 - 13
paddlex/paddleseg/models/unet_3plus.py

@@ -62,8 +62,10 @@ class UNet3Plus(nn.Layer):
             self.deepsup = DeepSup(self.up_channels, self.filters, num_classes)
             self.deepsup = DeepSup(self.up_channels, self.filters, num_classes)
             if self.is_CGM:
             if self.is_CGM:
                 self.cls = nn.Sequential(
                 self.cls = nn.Sequential(
-                    nn.Dropout(p=0.5), nn.Conv2D(self.filters[4], 2, 1),
-                    nn.AdaptiveMaxPool2D(1), nn.Sigmoid())
+                    nn.Dropout(p=0.5),
+                    nn.Conv2D(self.filters[4], 2, 1),
+                    nn.AdaptiveMaxPool2D(1),
+                    nn.Sigmoid())
         else:
         else:
             self.outconv1 = nn.Conv2D(
             self.outconv1 = nn.Conv2D(
                 self.up_channels, num_classes, 3, padding=1)
                 self.up_channels, num_classes, 3, padding=1)
@@ -93,7 +95,8 @@ class UNet3Plus(nn.Layer):
                 cls_branch = self.cls(hds[-1]).squeeze(3).squeeze(
                 cls_branch = self.cls(hds[-1]).squeeze(3).squeeze(
                     2)  # (B,N,1,1)->(B,N)
                     2)  # (B,N,1,1)->(B,N)
                 cls_branch_max = cls_branch.argmax(axis=1)
                 cls_branch_max = cls_branch.argmax(axis=1)
-                cls_branch_max = cls_branch_max.reshape((-1, 1)).astype('float')
+                cls_branch_max = cls_branch_max.reshape(
+                    (-1, 1)).astype('float')
                 out = [self.dotProduct(d, cls_branch_max) for d in out]
                 out = [self.dotProduct(d, cls_branch_max) for d in out]
         else:
         else:
             out = [self.outconv1(hds[0])]  # d1->320*320*num_classes
             out = [self.outconv1(hds[0])]  # d1->320*320*num_classes
@@ -185,7 +188,8 @@ class Decoder(nn.Layer):
         self.hd4_UT_hd1 = nn.Upsample(scale_factor=8, mode='bilinear')  # 14*14
         self.hd4_UT_hd1 = nn.Upsample(scale_factor=8, mode='bilinear')  # 14*14
         self.hd4_UT_hd1_cbr = ConvBnReLU2D(up_channels, cat_channels)
         self.hd4_UT_hd1_cbr = ConvBnReLU2D(up_channels, cat_channels)
         # hd5->20*20, hd1->320*320, Upsample 16 times
         # hd5->20*20, hd1->320*320, Upsample 16 times
-        self.hd5_UT_hd1 = nn.Upsample(scale_factor=16, mode='bilinear')  # 14*14
+        self.hd5_UT_hd1 = nn.Upsample(
+            scale_factor=16, mode='bilinear')  # 14*14
         self.hd5_UT_hd1_cbr = ConvBnReLU2D(filters[4], cat_channels)
         self.hd5_UT_hd1_cbr = ConvBnReLU2D(filters[4], cat_channels)
         # fusion(h1_Cat_hd1, hd2_UT_hd1, hd3_UT_hd1, hd4_UT_hd1, hd5_UT_hd1)
         # fusion(h1_Cat_hd1, hd2_UT_hd1, hd3_UT_hd1, hd4_UT_hd1, hd5_UT_hd1)
         self.cbr1d_1 = ConvBnReLU2D(up_channels, up_channels)  # 16
         self.cbr1d_1 = ConvBnReLU2D(up_channels, up_channels)  # 16
@@ -217,8 +221,9 @@ class Decoder(nn.Layer):
         hd5_UT_hd2 = self.hd5_UT_hd2_cbr(self.hd5_UT_hd2(hd5))
         hd5_UT_hd2 = self.hd5_UT_hd2_cbr(self.hd5_UT_hd2(hd5))
         # hd2->160*160*up_channels
         # hd2->160*160*up_channels
         hd2 = self.cbr2d_1(
         hd2 = self.cbr2d_1(
-            paddle.concat(
-                [h1_PT_hd2, h2_Cat_hd2, hd3_UT_hd2, hd4_UT_hd2, hd5_UT_hd2], 1))
+            paddle.concat([
+                h1_PT_hd2, h2_Cat_hd2, hd3_UT_hd2, hd4_UT_hd2, hd5_UT_hd2
+            ], 1))
         h1_Cat_hd1 = self.h1_Cat_hd1_cbr(h1)
         h1_Cat_hd1 = self.h1_Cat_hd1_cbr(h1)
         hd2_UT_hd1 = self.hd2_UT_hd1_cbr(self.hd2_UT_hd1(hd2))
         hd2_UT_hd1 = self.hd2_UT_hd1_cbr(self.hd2_UT_hd1(hd2))
         hd3_UT_hd1 = self.hd3_UT_hd1_cbr(self.hd3_UT_hd1(hd3))
         hd3_UT_hd1 = self.hd3_UT_hd1_cbr(self.hd3_UT_hd1(hd3))
@@ -226,9 +231,9 @@ class Decoder(nn.Layer):
         hd5_UT_hd1 = self.hd5_UT_hd1_cbr(self.hd5_UT_hd1(hd5))
         hd5_UT_hd1 = self.hd5_UT_hd1_cbr(self.hd5_UT_hd1(hd5))
         # hd1->320*320*up_channels
         # hd1->320*320*up_channels
         hd1 = self.cbr1d_1(
         hd1 = self.cbr1d_1(
-            paddle.concat(
-                [h1_Cat_hd1, hd2_UT_hd1, hd3_UT_hd1, hd4_UT_hd1, hd5_UT_hd1],
-                1))
+            paddle.concat([
+                h1_Cat_hd1, hd2_UT_hd1, hd3_UT_hd1, hd4_UT_hd1, hd5_UT_hd1
+            ], 1))
         return [hd1, hd2, hd3, hd4, hd5]
         return [hd1, hd2, hd3, hd4, hd5]
 
 
 
 
@@ -254,15 +259,19 @@ class DeepSup(nn.Layer):
 class ConvBnReLU2D(nn.Sequential):
 class ConvBnReLU2D(nn.Sequential):
     def __init__(self, in_channels, out_channels):
     def __init__(self, in_channels, out_channels):
         super(ConvBnReLU2D, self).__init__(
         super(ConvBnReLU2D, self).__init__(
-            nn.Conv2D(in_channels, out_channels, 3, padding=1),
-            nn.BatchNorm(out_channels), nn.ReLU())
+            nn.Conv2D(
+                in_channels, out_channels, 3, padding=1),
+            nn.BatchNorm(out_channels),
+            nn.ReLU())
 
 
 
 
 class ConvUp2D(nn.Sequential):
 class ConvUp2D(nn.Sequential):
     def __init__(self, in_channels, out_channels, scale_factor):
     def __init__(self, in_channels, out_channels, scale_factor):
         super(ConvUp2D, self).__init__(
         super(ConvUp2D, self).__init__(
-            nn.Conv2D(in_channels, out_channels, 3, padding=1),
-            nn.Upsample(scale_factor=scale_factor, mode='bilinear'))
+            nn.Conv2D(
+                in_channels, out_channels, 3, padding=1),
+            nn.Upsample(
+                scale_factor=scale_factor, mode='bilinear'))
 
 
 
 
 class MaxPoolConv2D(nn.Sequential):
 class MaxPoolConv2D(nn.Sequential):

+ 4 - 3
paddlex/paddleseg/models/unet_plusplus.py

@@ -195,9 +195,10 @@ class DoubleConv(nn.Layer):
         super(DoubleConv, self).__init__()
         super(DoubleConv, self).__init__()
         self.conv = nn.Sequential(
         self.conv = nn.Sequential(
             nn.Conv2D(in_channels, out_channels, filter_size, stride, padding),
             nn.Conv2D(in_channels, out_channels, filter_size, stride, padding),
-            SyncBatchNorm(out_channels), nn.ReLU(),
-            nn.Conv2D(out_channels, out_channels, filter_size, stride, padding),
-            SyncBatchNorm(out_channels), nn.ReLU())
+            SyncBatchNorm(out_channels),
+            nn.ReLU(),
+            nn.Conv2D(out_channels, out_channels, filter_size, stride,
+                      padding), SyncBatchNorm(out_channels), nn.ReLU())
 
 
     def forward(self, inputs):
     def forward(self, inputs):
         conv = self.conv(inputs)
         conv = self.conv(inputs)

+ 5 - 4
paddlex/paddleseg/transforms/functional.py

@@ -128,11 +128,12 @@ def onehot_to_binary_edge(mask, radius):
 
 
     edge = np.zeros(mask.shape[1:])
     edge = np.zeros(mask.shape[1:])
     # pad borders
     # pad borders
-    mask = np.pad(
-        mask, ((0, 0), (1, 1), (1, 1)), mode='constant', constant_values=0)
+    mask = np.pad(mask, ((0, 0), (1, 1), (1, 1)),
+                  mode='constant',
+                  constant_values=0)
     for i in range(num_classes):
     for i in range(num_classes):
-        dist = distance_transform_edt(
-            mask[i, :]) + distance_transform_edt(1.0 - mask[i, :])
+        dist = distance_transform_edt(mask[i, :]) + distance_transform_edt(
+            1.0 - mask[i, :])
         dist = dist[1:-1, 1:-1]
         dist = dist[1:-1, 1:-1]
         dist[dist > radius] = 0
         dist[dist > radius] = 0
         edge += dist
         edge += dist

+ 13 - 10
paddlex/paddleseg/transforms/transforms.py

@@ -282,7 +282,8 @@ class LimitLong:
         if target != long_edge:
         if target != long_edge:
             im = functional.resize_long(im, target)
             im = functional.resize_long(im, target)
             if label is not None:
             if label is not None:
-                label = functional.resize_long(label, target, cv2.INTER_NEAREST)
+                label = functional.resize_long(label, target,
+                                               cv2.INTER_NEAREST)
 
 
         if label is None:
         if label is None:
             return (im, )
             return (im, )
@@ -303,8 +304,8 @@ class ResizeRangeScaling:
     def __init__(self, min_value=400, max_value=600):
     def __init__(self, min_value=400, max_value=600):
         if min_value > max_value:
         if min_value > max_value:
             raise ValueError('min_value must be less than max_value, '
             raise ValueError('min_value must be less than max_value, '
-                             'but they are {} and {}.'.format(
-                                 min_value, max_value))
+                             'but they are {} and {}.'.format(min_value,
+                                                              max_value))
         self.min_value = min_value
         self.min_value = min_value
         self.max_value = max_value
         self.max_value = max_value
 
 
@@ -415,8 +416,9 @@ class Normalize:
     def __init__(self, mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5)):
     def __init__(self, mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5)):
         self.mean = mean
         self.mean = mean
         self.std = std
         self.std = std
-        if not (isinstance(self.mean, (list, tuple))
-                and isinstance(self.std, (list, tuple))):
+        if not (isinstance(self.mean,
+                           (list, tuple)) and isinstance(self.std,
+                                                         (list, tuple))):
             raise ValueError(
             raise ValueError(
                 "{}: input type is invalid. It should be list or tuple".format(
                 "{}: input type is invalid. It should be list or tuple".format(
                     self))
                     self))
@@ -562,9 +564,10 @@ class PaddingByAspectRatio:
             img_height = int(img_width / self.aspect_ratio)
             img_height = int(img_width / self.aspect_ratio)
         else:
         else:
             img_width = int(img_height * self.aspect_ratio)
             img_width = int(img_height * self.aspect_ratio)
-        padding = Padding((img_width, img_height),
-                          im_padding_value=self.im_padding_value,
-                          label_padding_value=self.label_padding_value)
+        padding = Padding(
+            (img_width, img_height),
+            im_padding_value=self.im_padding_value,
+            label_padding_value=self.label_padding_value)
         return padding(im, label)
         return padding(im, label)
 
 
 
 
@@ -655,8 +658,8 @@ class RandomPaddingCrop:
                 h_off = np.random.randint(img_height - crop_height + 1)
                 h_off = np.random.randint(img_height - crop_height + 1)
                 w_off = np.random.randint(img_width - crop_width + 1)
                 w_off = np.random.randint(img_width - crop_width + 1)
 
 
-                im = im[h_off:(crop_height + h_off), w_off:(
-                    w_off + crop_width), :]
+                im = im[h_off:(crop_height + h_off), w_off:(w_off + crop_width
+                                                            ), :]
                 if label is not None:
                 if label is not None:
                     label = label[h_off:(crop_height + h_off), w_off:(
                     label = label[h_off:(crop_height + h_off), w_off:(
                         w_off + crop_width)]
                         w_off + crop_width)]

+ 2 - 1
paddlex/paddleseg/utils/config_check.py

@@ -37,7 +37,8 @@ def num_classes_check(cfg, train_dataset, val_dataset):
         num_classes_set.add(train_dataset.num_classes)
         num_classes_set.add(train_dataset.num_classes)
     if val_dataset and hasattr(val_dataset, 'num_classes'):
     if val_dataset and hasattr(val_dataset, 'num_classes'):
         num_classes_set.add(val_dataset.num_classes)
         num_classes_set.add(val_dataset.num_classes)
-    if cfg.dic.get('model', None) and cfg.dic['model'].get('num_classes', None):
+    if cfg.dic.get('model', None) and cfg.dic['model'].get('num_classes',
+                                                           None):
         num_classes_set.add(cfg.dic['model'].get('num_classes'))
         num_classes_set.add(cfg.dic['model'].get('num_classes'))
     if (not cfg.train_dataset) and (not cfg.val_dataset):
     if (not cfg.train_dataset) and (not cfg.val_dataset):
         raise ValueError(
         raise ValueError(

+ 4 - 4
paddlex/paddleseg/utils/download.py

@@ -102,8 +102,8 @@ def _uncompress_file(filepath, extrapath, delete_file, print_progress):
     for total_num, index, rootpath in handler(filepath, extrapath):
     for total_num, index, rootpath in handler(filepath, extrapath):
         if print_progress:
         if print_progress:
             done = int(50 * float(index) / total_num)
             done = int(50 * float(index) / total_num)
-            progress(
-                "[%-50s] %.2f%%" % ('=' * done, float(100 * index) / total_num))
+            progress("[%-50s] %.2f%%" %
+                     ('=' * done, float(100 * index) / total_num))
     if print_progress:
     if print_progress:
         progress("[%-50s] %.2f%%" % ('=' * 50, 100), end=True)
         progress("[%-50s] %.2f%%" % ('=' * 50, 100), end=True)
 
 
@@ -133,8 +133,8 @@ def download_file_and_uncompress(url,
     savepath = os.path.join(savepath, savename)
     savepath = os.path.join(savepath, savename)
     savename = ".".join(savename.split(".")[:-1])
     savename = ".".join(savename.split(".")[:-1])
     savename = os.path.join(extrapath, savename)
     savename = os.path.join(extrapath, savename)
-    extraname = savename if extraname is None else os.path.join(
-        extrapath, extraname)
+    extraname = savename if extraname is None else os.path.join(extrapath,
+                                                                extraname)
 
 
     if cover:
     if cover:
         if os.path.exists(savepath):
         if os.path.exists(savepath):

+ 2 - 3
paddlex/paddleseg/utils/logger.py

@@ -27,9 +27,8 @@ def log(level=2, message=""):
         time_array = time.localtime(current_time)
         time_array = time.localtime(current_time)
         current_time = time.strftime("%Y-%m-%d %H:%M:%S", time_array)
         current_time = time.strftime("%Y-%m-%d %H:%M:%S", time_array)
         if log_level >= level:
         if log_level >= level:
-            print(
-                "{} [{}]\t{}".format(current_time, levels[level],
-                                     message).encode("utf-8").decode("latin1"))
+            print("{} [{}]\t{}".format(current_time, levels[level], message)
+                  .encode("utf-8").decode("latin1"))
             sys.stdout.flush()
             sys.stdout.flush()
 
 
 
 

+ 2 - 2
paddlex/paddleseg/utils/metrics.py

@@ -38,8 +38,8 @@ def calculate_area(pred, label, num_classes, ignore_index=255):
         label = paddle.squeeze(label, axis=1)
         label = paddle.squeeze(label, axis=1)
     if not pred.shape == label.shape:
     if not pred.shape == label.shape:
         raise ValueError('Shape of `pred` and `label should be equal, '
         raise ValueError('Shape of `pred` and `label should be equal, '
-                         'but there are {} and {}.'.format(
-                             pred.shape, label.shape))
+                         'but there are {} and {}.'.format(pred.shape,
+                                                           label.shape))
 
 
     # Delete ignore_index
     # Delete ignore_index
     mask = label != ignore_index
     mask = label != ignore_index

+ 18 - 15
paddlex/paddleseg/utils/progbar.py

@@ -52,11 +52,10 @@ class Progbar(object):
         else:
         else:
             self.stateful_metrics = set()
             self.stateful_metrics = set()
 
 
-        self._dynamic_display = ((hasattr(sys.stderr, 'isatty')
-                                  and sys.stderr.isatty())
-                                 or 'ipykernel' in sys.modules
-                                 or 'posix' in sys.modules
-                                 or 'PYCHARM_HOSTED' in os.environ)
+        self._dynamic_display = (
+            (hasattr(sys.stderr, 'isatty') and
+             sys.stderr.isatty()) or 'ipykernel' in sys.modules or
+            'posix' in sys.modules or 'PYCHARM_HOSTED' in os.environ)
         self._total_width = 0
         self._total_width = 0
         self._seen_so_far = 0
         self._seen_so_far = 0
         # We use a dict + list to avoid garbage collection
         # We use a dict + list to avoid garbage collection
@@ -122,7 +121,8 @@ class Progbar(object):
 
 
             if self.target is not None:
             if self.target is not None:
                 numdigits = int(np.log10(self.target)) + 1
                 numdigits = int(np.log10(self.target)) + 1
-                bar = ('%' + str(numdigits) + 'd/%d [') % (current, self.target)
+                bar = ('%' + str(numdigits) + 'd/%d [') % (current,
+                                                           self.target)
                 prog = float(current) / self.target
                 prog = float(current) / self.target
                 prog_width = int(self.width * prog)
                 prog_width = int(self.width * prog)
                 if prog_width > 0:
                 if prog_width > 0:
@@ -148,14 +148,16 @@ class Progbar(object):
                 if time_per_unit >= 1 or time_per_unit == 0:
                 if time_per_unit >= 1 or time_per_unit == 0:
                     info += ' %.0fs/%s' % (time_per_unit, self.unit_name)
                     info += ' %.0fs/%s' % (time_per_unit, self.unit_name)
                 elif time_per_unit >= 1e-3:
                 elif time_per_unit >= 1e-3:
-                    info += ' %.0fms/%s' % (time_per_unit * 1e3, self.unit_name)
+                    info += ' %.0fms/%s' % (time_per_unit * 1e3,
+                                            self.unit_name)
                 else:
                 else:
-                    info += ' %.0fus/%s' % (time_per_unit * 1e6, self.unit_name)
+                    info += ' %.0fus/%s' % (time_per_unit * 1e6,
+                                            self.unit_name)
             else:
             else:
                 eta = time_per_unit * (self.target - current)
                 eta = time_per_unit * (self.target - current)
                 if eta > 3600:
                 if eta > 3600:
-                    eta_format = '%d:%02d:%02d' % (eta // 3600,
-                                                   (eta % 3600) // 60, eta % 60)
+                    eta_format = '%d:%02d:%02d' % (eta // 3600, (eta % 3600) //
+                                                   60, eta % 60)
                 elif eta > 60:
                 elif eta > 60:
                     eta_format = '%d:%02d' % (eta // 60, eta % 60)
                     eta_format = '%d:%02d' % (eta // 60, eta % 60)
                 else:
                 else:
@@ -166,8 +168,8 @@ class Progbar(object):
             for k in self._values_order:
             for k in self._values_order:
                 info += ' - %s:' % k
                 info += ' - %s:' % k
                 if isinstance(self._values[k], list):
                 if isinstance(self._values[k], list):
-                    avg = np.mean(
-                        self._values[k][0] / max(1, self._values[k][1]))
+                    avg = np.mean(self._values[k][0] /
+                                  max(1, self._values[k][1]))
                     if abs(avg) > 1e-3:
                     if abs(avg) > 1e-3:
                         info += ' %.4f' % avg
                         info += ' %.4f' % avg
                     else:
                     else:
@@ -188,12 +190,13 @@ class Progbar(object):
         elif self.verbose == 2:
         elif self.verbose == 2:
             if finalize:
             if finalize:
                 numdigits = int(np.log10(self.target)) + 1
                 numdigits = int(np.log10(self.target)) + 1
-                count = ('%' + str(numdigits) + 'd/%d') % (current, self.target)
+                count = ('%' + str(numdigits) + 'd/%d') % (current,
+                                                           self.target)
                 info = count + info
                 info = count + info
                 for k in self._values_order:
                 for k in self._values_order:
                     info += ' - %s:' % k
                     info += ' - %s:' % k
-                    avg = np.mean(
-                        self._values[k][0] / max(1, self._values[k][1]))
+                    avg = np.mean(self._values[k][0] /
+                                  max(1, self._values[k][1]))
                     if avg > 1e-3:
                     if avg > 1e-3:
                         info += ' %.4f' % avg
                         info += ' %.4f' % avg
                     else:
                     else:

+ 11 - 11
paddlex/paddleseg/utils/utils.py

@@ -26,7 +26,7 @@ from paddlex.paddleseg.utils.download import download_file_and_uncompress
 
 
 
 
 @contextlib.contextmanager
 @contextlib.contextmanager
-def generate_tempdir(directory: str = None, **kwargs):
+def generate_tempdir(directory: str=None, **kwargs):
     '''Generate a temporary directory'''
     '''Generate a temporary directory'''
     directory = seg_env.TMP_HOME if not directory else directory
     directory = seg_env.TMP_HOME if not directory else directory
     with tempfile.TemporaryDirectory(dir=directory, **kwargs) as _dir:
     with tempfile.TemporaryDirectory(dir=directory, **kwargs) as _dir:
@@ -43,7 +43,8 @@ def load_entire_model(model, pretrained):
 
 
 def load_pretrained_model(model, pretrained_model):
 def load_pretrained_model(model, pretrained_model):
     if pretrained_model is not None:
     if pretrained_model is not None:
-        logger.info('Loading pretrained model from {}'.format(pretrained_model))
+        logger.info('Loading pretrained model from {}'.format(
+            pretrained_model))
         # download pretrained model from url
         # download pretrained model from url
         if urlparse(pretrained_model).netloc:
         if urlparse(pretrained_model).netloc:
             pretrained_model = unquote(pretrained_model)
             pretrained_model = unquote(pretrained_model)
@@ -73,24 +74,23 @@ def load_pretrained_model(model, pretrained_model):
             for k in keys:
             for k in keys:
                 if k not in para_state_dict:
                 if k not in para_state_dict:
                     logger.warning("{} is not in pretrained model".format(k))
                     logger.warning("{} is not in pretrained model".format(k))
-                elif list(para_state_dict[k].shape) != list(
-                        model_state_dict[k].shape):
+                elif list(para_state_dict[k].shape) != list(model_state_dict[k]
+                                                            .shape):
                     logger.warning(
                     logger.warning(
                         "[SKIP] Shape of pretrained params {} doesn't match.(Pretrained: {}, Actual: {})"
                         "[SKIP] Shape of pretrained params {} doesn't match.(Pretrained: {}, Actual: {})"
-                        .format(k, para_state_dict[k].shape,
-                                model_state_dict[k].shape))
+                        .format(k, para_state_dict[k].shape, model_state_dict[
+                            k].shape))
                 else:
                 else:
                     model_state_dict[k] = para_state_dict[k]
                     model_state_dict[k] = para_state_dict[k]
                     num_params_loaded += 1
                     num_params_loaded += 1
             model.set_dict(model_state_dict)
             model.set_dict(model_state_dict)
             logger.info("There are {}/{} variables loaded into {}.".format(
             logger.info("There are {}/{} variables loaded into {}.".format(
-                num_params_loaded, len(model_state_dict),
-                model.__class__.__name__))
+                num_params_loaded,
+                len(model_state_dict), model.__class__.__name__))
 
 
         else:
         else:
-            raise ValueError(
-                'The pretrained model directory is not Found: {}'.format(
-                    pretrained_model))
+            raise ValueError('The pretrained model directory is not Found: {}'.
+                             format(pretrained_model))
     else:
     else:
         logger.info(
         logger.info(
             'No pretrained model to load, {} will be trained from scratch.'.
             'No pretrained model to load, {} will be trained from scratch.'.

+ 18 - 6
paddlex/ppcls/data/imaug/operators.py

@@ -195,10 +195,18 @@ class NormalizeImage(object):
     """ normalize image such as substract mean, divide std
     """ normalize image such as substract mean, divide std
     """
     """
 
 
-    def __init__(self, scale=None, mean=None, std=None, order='chw', output_fp16=False, channel_num=3):
+    def __init__(self,
+                 scale=None,
+                 mean=None,
+                 std=None,
+                 order='chw',
+                 output_fp16=False,
+                 channel_num=3):
         if isinstance(scale, str):
         if isinstance(scale, str):
             scale = eval(scale)
             scale = eval(scale)
-        assert channel_num in [3, 4], "channel number of input image should be set to 3 or 4."
+        assert channel_num in [
+            3, 4
+        ], "channel number of input image should be set to 3 or 4."
         self.channel_num = channel_num
         self.channel_num = channel_num
         self.output_dtype = 'float16' if output_fp16 else 'float32'
         self.output_dtype = 'float16' if output_fp16 else 'float32'
         self.scale = np.float32(scale if scale is not None else 1.0 / 255.0)
         self.scale = np.float32(scale if scale is not None else 1.0 / 255.0)
@@ -217,15 +225,19 @@ class NormalizeImage(object):
 
 
         assert isinstance(img,
         assert isinstance(img,
                           np.ndarray), "invalid input 'img' in NormalizeImage"
                           np.ndarray), "invalid input 'img' in NormalizeImage"
-        
+
         img = (img.astype('float32') * self.scale - self.mean) / self.std
         img = (img.astype('float32') * self.scale - self.mean) / self.std
 
 
         if self.channel_num == 4:
         if self.channel_num == 4:
             img_h = img.shape[1] if self.order == 'chw' else img.shape[0]
             img_h = img.shape[1] if self.order == 'chw' else img.shape[0]
             img_w = img.shape[2] if self.order == 'chw' else img.shape[1]
             img_w = img.shape[2] if self.order == 'chw' else img.shape[1]
-            pad_zeros = np.zeros((1, img_h, img_w)) if self.order == 'chw' else np.zeros((img_h, img_w, 1))
-            img = (np.concatenate((img, pad_zeros), axis=0) if self.order == 'chw'
-                   else np.concatenate((img, pad_zeros), axis=2))
+            pad_zeros = np.zeros(
+                (1, img_h, img_w)) if self.order == 'chw' else np.zeros(
+                    (img_h, img_w, 1))
+            img = (np.concatenate(
+                (img, pad_zeros), axis=0)
+                   if self.order == 'chw' else np.concatenate(
+                       (img, pad_zeros), axis=2))
         return img.astype(self.output_dtype)
         return img.astype(self.output_dtype)
 
 
 
 

+ 339 - 279
paddlex/ppcls/modeling/architectures/inception_v3.py

@@ -16,7 +16,6 @@ from __future__ import absolute_import
 from __future__ import division
 from __future__ import division
 from __future__ import print_function
 from __future__ import print_function
 
 
-
 import paddle
 import paddle
 from paddle import ParamAttr
 from paddle import ParamAttr
 import paddle.nn as nn
 import paddle.nn as nn
@@ -48,53 +47,60 @@ class ConvBNLayer(nn.Layer):
             stride=stride,
             stride=stride,
             padding=padding,
             padding=padding,
             groups=groups,
             groups=groups,
-            weight_attr=ParamAttr(name=name+"_weights"),
+            weight_attr=ParamAttr(name=name + "_weights"),
             bias_attr=False)
             bias_attr=False)
         self.batch_norm = BatchNorm(
         self.batch_norm = BatchNorm(
             num_filters,
             num_filters,
             act=act,
             act=act,
-            param_attr=ParamAttr(name=name+"_bn_scale"),
-            bias_attr=ParamAttr(name=name+"_bn_offset"),
-            moving_mean_name=name+"_bn_mean",
-            moving_variance_name=name+"_bn_variance")
+            param_attr=ParamAttr(name=name + "_bn_scale"),
+            bias_attr=ParamAttr(name=name + "_bn_offset"),
+            moving_mean_name=name + "_bn_mean",
+            moving_variance_name=name + "_bn_variance")
 
 
     def forward(self, inputs):
     def forward(self, inputs):
         y = self.conv(inputs)
         y = self.conv(inputs)
         y = self.batch_norm(y)
         y = self.batch_norm(y)
         return y
         return y
 
 
+
 class InceptionStem(nn.Layer):
 class InceptionStem(nn.Layer):
     def __init__(self):
     def __init__(self):
         super(InceptionStem, self).__init__()
         super(InceptionStem, self).__init__()
-        self.conv_1a_3x3 = ConvBNLayer(num_channels=3,
-                                       num_filters=32,
-                                       filter_size=3,
-                                       stride=2,
-                                       act="relu",
-                                       name="conv_1a_3x3")
-        self.conv_2a_3x3 = ConvBNLayer(num_channels=32,
-                                       num_filters=32,
-                                       filter_size=3,
-                                       stride=1,
-                                       act="relu",
-                                       name="conv_2a_3x3")
-        self.conv_2b_3x3 = ConvBNLayer(num_channels=32,
-                                       num_filters=64,
-                                       filter_size=3,
-                                       padding=1,
-                                       act="relu",
-                                       name="conv_2b_3x3")
+        self.conv_1a_3x3 = ConvBNLayer(
+            num_channels=3,
+            num_filters=32,
+            filter_size=3,
+            stride=2,
+            act="relu",
+            name="conv_1a_3x3")
+        self.conv_2a_3x3 = ConvBNLayer(
+            num_channels=32,
+            num_filters=32,
+            filter_size=3,
+            stride=1,
+            act="relu",
+            name="conv_2a_3x3")
+        self.conv_2b_3x3 = ConvBNLayer(
+            num_channels=32,
+            num_filters=64,
+            filter_size=3,
+            padding=1,
+            act="relu",
+            name="conv_2b_3x3")
         self.maxpool = MaxPool2D(kernel_size=3, stride=2, padding=0)
         self.maxpool = MaxPool2D(kernel_size=3, stride=2, padding=0)
-        self.conv_3b_1x1 = ConvBNLayer(num_channels=64,
-                                       num_filters=80,
-                                       filter_size=1,
-                                       act="relu",
-                                       name="conv_3b_1x1")        
-        self.conv_4a_3x3 = ConvBNLayer(num_channels=80,
-                                       num_filters=192,
-                                       filter_size=3,
-                                       act="relu",
-                                       name="conv_4a_3x3")
+        self.conv_3b_1x1 = ConvBNLayer(
+            num_channels=64,
+            num_filters=80,
+            filter_size=1,
+            act="relu",
+            name="conv_3b_1x1")
+        self.conv_4a_3x3 = ConvBNLayer(
+            num_channels=80,
+            num_filters=192,
+            filter_size=3,
+            act="relu",
+            name="conv_4a_3x3")
+
     def forward(self, x):
     def forward(self, x):
         y = self.conv_1a_3x3(x)
         y = self.conv_1a_3x3(x)
         y = self.conv_2a_3x3(y)
         y = self.conv_2a_3x3(y)
@@ -105,50 +111,59 @@ class InceptionStem(nn.Layer):
         y = self.maxpool(y)
         y = self.maxpool(y)
         return y
         return y
 
 
-                         
+
 class InceptionA(nn.Layer):
 class InceptionA(nn.Layer):
     def __init__(self, num_channels, pool_features, name=None):
     def __init__(self, num_channels, pool_features, name=None):
         super(InceptionA, self).__init__()
         super(InceptionA, self).__init__()
-        self.branch1x1 = ConvBNLayer(num_channels=num_channels,
-                                     num_filters=64,
-                                     filter_size=1,
-                                     act="relu",
-                                     name="inception_a_branch1x1_"+name) 
-        self.branch5x5_1 = ConvBNLayer(num_channels=num_channels,
-                                       num_filters=48, 
-                                       filter_size=1, 
-                                       act="relu",
-                                       name="inception_a_branch5x5_1_"+name)
-        self.branch5x5_2 = ConvBNLayer(num_channels=48, 
-                                       num_filters=64, 
-                                       filter_size=5, 
-                                       padding=2, 
-                                       act="relu",
-                                       name="inception_a_branch5x5_2_"+name)
-
-        self.branch3x3dbl_1 = ConvBNLayer(num_channels=num_channels,
-                                       num_filters=64, 
-                                       filter_size=1, 
-                                       act="relu",
-                                       name="inception_a_branch3x3dbl_1_"+name)
-        self.branch3x3dbl_2 = ConvBNLayer(num_channels=64,
-                                       num_filters=96, 
-                                       filter_size=3, 
-                                       padding=1,
-                                       act="relu",
-                                       name="inception_a_branch3x3dbl_2_"+name)
-        self.branch3x3dbl_3 = ConvBNLayer(num_channels=96,
-                               num_filters=96, 
-                               filter_size=3, 
-                               padding=1,
-                               act="relu",
-                               name="inception_a_branch3x3dbl_3_"+name)
-        self.branch_pool = AvgPool2D(kernel_size=3, stride=1, padding=1, exclusive=False)
-        self.branch_pool_conv = ConvBNLayer(num_channels=num_channels,
-                               num_filters=pool_features, 
-                               filter_size=1, 
-                               act="relu",
-                               name="inception_a_branch_pool_"+name)
+        self.branch1x1 = ConvBNLayer(
+            num_channels=num_channels,
+            num_filters=64,
+            filter_size=1,
+            act="relu",
+            name="inception_a_branch1x1_" + name)
+        self.branch5x5_1 = ConvBNLayer(
+            num_channels=num_channels,
+            num_filters=48,
+            filter_size=1,
+            act="relu",
+            name="inception_a_branch5x5_1_" + name)
+        self.branch5x5_2 = ConvBNLayer(
+            num_channels=48,
+            num_filters=64,
+            filter_size=5,
+            padding=2,
+            act="relu",
+            name="inception_a_branch5x5_2_" + name)
+
+        self.branch3x3dbl_1 = ConvBNLayer(
+            num_channels=num_channels,
+            num_filters=64,
+            filter_size=1,
+            act="relu",
+            name="inception_a_branch3x3dbl_1_" + name)
+        self.branch3x3dbl_2 = ConvBNLayer(
+            num_channels=64,
+            num_filters=96,
+            filter_size=3,
+            padding=1,
+            act="relu",
+            name="inception_a_branch3x3dbl_2_" + name)
+        self.branch3x3dbl_3 = ConvBNLayer(
+            num_channels=96,
+            num_filters=96,
+            filter_size=3,
+            padding=1,
+            act="relu",
+            name="inception_a_branch3x3dbl_3_" + name)
+        self.branch_pool = AvgPool2D(
+            kernel_size=3, stride=1, padding=1, exclusive=False)
+        self.branch_pool_conv = ConvBNLayer(
+            num_channels=num_channels,
+            num_filters=pool_features,
+            filter_size=1,
+            act="relu",
+            name="inception_a_branch_pool_" + name)
+
     def forward(self, x):
     def forward(self, x):
         branch1x1 = self.branch1x1(x)
         branch1x1 = self.branch1x1(x)
         branch5x5 = self.branch5x5_1(x)
         branch5x5 = self.branch5x5_1(x)
@@ -161,38 +176,43 @@ class InceptionA(nn.Layer):
         branch_pool = self.branch_pool(x)
         branch_pool = self.branch_pool(x)
 
 
         branch_pool = self.branch_pool_conv(branch_pool)
         branch_pool = self.branch_pool_conv(branch_pool)
-        outputs = paddle.concat([branch1x1, branch5x5, branch3x3dbl, branch_pool], axis=1)
+        outputs = paddle.concat(
+            [branch1x1, branch5x5, branch3x3dbl, branch_pool], axis=1)
         return outputs
         return outputs
 
 
-    
+
 class InceptionB(nn.Layer):
 class InceptionB(nn.Layer):
     def __init__(self, num_channels, name=None):
     def __init__(self, num_channels, name=None):
         super(InceptionB, self).__init__()
         super(InceptionB, self).__init__()
-        self.branch3x3 = ConvBNLayer(num_channels=num_channels,
-                                     num_filters=384,
-                                     filter_size=3,
-                                     stride=2,
-                                     act="relu",
-                                     name="inception_b_branch3x3_"+name) 
-        self.branch3x3dbl_1 = ConvBNLayer(num_channels=num_channels, 
-                                       num_filters=64, 
-                                       filter_size=1, 
-                                       act="relu",
-                                       name="inception_b_branch3x3dbl_1_"+name)
-        self.branch3x3dbl_2 = ConvBNLayer(num_channels=64, 
-                                       num_filters=96, 
-                                       filter_size=3, 
-                                       padding=1,
-                                       act="relu",
-                                       name="inception_b_branch3x3dbl_2_"+name)
-        self.branch3x3dbl_3 = ConvBNLayer(num_channels=96, 
-                                       num_filters=96, 
-                                       filter_size=3,
-                                       stride=2,
-                                       act="relu",
-                                       name="inception_b_branch3x3dbl_3_"+name)
+        self.branch3x3 = ConvBNLayer(
+            num_channels=num_channels,
+            num_filters=384,
+            filter_size=3,
+            stride=2,
+            act="relu",
+            name="inception_b_branch3x3_" + name)
+        self.branch3x3dbl_1 = ConvBNLayer(
+            num_channels=num_channels,
+            num_filters=64,
+            filter_size=1,
+            act="relu",
+            name="inception_b_branch3x3dbl_1_" + name)
+        self.branch3x3dbl_2 = ConvBNLayer(
+            num_channels=64,
+            num_filters=96,
+            filter_size=3,
+            padding=1,
+            act="relu",
+            name="inception_b_branch3x3dbl_2_" + name)
+        self.branch3x3dbl_3 = ConvBNLayer(
+            num_channels=96,
+            num_filters=96,
+            filter_size=3,
+            stride=2,
+            act="relu",
+            name="inception_b_branch3x3dbl_3_" + name)
         self.branch_pool = MaxPool2D(kernel_size=3, stride=2)
         self.branch_pool = MaxPool2D(kernel_size=3, stride=2)
-        
+
     def forward(self, x):
     def forward(self, x):
         branch3x3 = self.branch3x3(x)
         branch3x3 = self.branch3x3(x)
 
 
@@ -206,72 +226,84 @@ class InceptionB(nn.Layer):
 
 
         return outputs
         return outputs
 
 
+
 class InceptionC(nn.Layer):
 class InceptionC(nn.Layer):
     def __init__(self, num_channels, channels_7x7, name=None):
     def __init__(self, num_channels, channels_7x7, name=None):
         super(InceptionC, self).__init__()
         super(InceptionC, self).__init__()
-        self.branch1x1 = ConvBNLayer(num_channels=num_channels, 
-                                       num_filters=192, 
-                                       filter_size=1, 
-                                       act="relu",
-                                       name="inception_c_branch1x1_"+name)
-        self.branch7x7_1 = ConvBNLayer(num_channels=num_channels, 
-                                       num_filters=channels_7x7, 
-                                       filter_size=1, 
-                                       stride=1,
-                                       act="relu",
-                                       name="inception_c_branch7x7_1_"+name)
-        self.branch7x7_2 = ConvBNLayer(num_channels=channels_7x7,
-                                       num_filters=channels_7x7, 
-                                       filter_size=(1, 7), 
-                                       stride=1,
-                                       padding=(0, 3),
-                                       act="relu",
-                                       name="inception_c_branch7x7_2_"+name)
-        self.branch7x7_3 = ConvBNLayer(num_channels=channels_7x7,
-                                       num_filters=192, 
-                                       filter_size=(7, 1), 
-                                       stride=1,
-                                       padding=(3, 0),
-                                       act="relu",
-                                       name="inception_c_branch7x7_3_"+name)
-        
-        self.branch7x7dbl_1 = ConvBNLayer(num_channels=num_channels, 
-                                       num_filters=channels_7x7, 
-                                       filter_size=1, 
-                                       act="relu",
-                                       name="inception_c_branch7x7dbl_1_"+name)
-        self.branch7x7dbl_2 = ConvBNLayer(num_channels=channels_7x7,  
-                                       num_filters=channels_7x7, 
-                                       filter_size=(7, 1), 
-                                       padding = (3, 0),
-                                       act="relu",
-                                       name="inception_c_branch7x7dbl_2_"+name)
-        self.branch7x7dbl_3 = ConvBNLayer(num_channels=channels_7x7, 
-                                       num_filters=channels_7x7, 
-                                       filter_size=(1, 7), 
-                                       padding = (0, 3),
-                                       act="relu",
-                                       name="inception_c_branch7x7dbl_3_"+name)
-        self.branch7x7dbl_4 = ConvBNLayer(num_channels=channels_7x7,  
-                                       num_filters=channels_7x7, 
-                                       filter_size=(7, 1), 
-                                       padding = (3, 0),
-                                       act="relu",
-                                       name="inception_c_branch7x7dbl_4_"+name)
-        self.branch7x7dbl_5 = ConvBNLayer(num_channels=channels_7x7, 
-                                       num_filters=192, 
-                                       filter_size=(1, 7), 
-                                       padding = (0, 3),
-                                       act="relu",
-                                       name="inception_c_branch7x7dbl_5_"+name)
-       
-        self.branch_pool = AvgPool2D(kernel_size=3, stride=1, padding=1, exclusive=False)
-        self.branch_pool_conv = ConvBNLayer(num_channels=num_channels,
-                                       num_filters=192, 
-                                       filter_size=1, 
-                                       act="relu",
-                                       name="inception_c_branch_pool_"+name)
-        
+        self.branch1x1 = ConvBNLayer(
+            num_channels=num_channels,
+            num_filters=192,
+            filter_size=1,
+            act="relu",
+            name="inception_c_branch1x1_" + name)
+        self.branch7x7_1 = ConvBNLayer(
+            num_channels=num_channels,
+            num_filters=channels_7x7,
+            filter_size=1,
+            stride=1,
+            act="relu",
+            name="inception_c_branch7x7_1_" + name)
+        self.branch7x7_2 = ConvBNLayer(
+            num_channels=channels_7x7,
+            num_filters=channels_7x7,
+            filter_size=(1, 7),
+            stride=1,
+            padding=(0, 3),
+            act="relu",
+            name="inception_c_branch7x7_2_" + name)
+        self.branch7x7_3 = ConvBNLayer(
+            num_channels=channels_7x7,
+            num_filters=192,
+            filter_size=(7, 1),
+            stride=1,
+            padding=(3, 0),
+            act="relu",
+            name="inception_c_branch7x7_3_" + name)
+
+        self.branch7x7dbl_1 = ConvBNLayer(
+            num_channels=num_channels,
+            num_filters=channels_7x7,
+            filter_size=1,
+            act="relu",
+            name="inception_c_branch7x7dbl_1_" + name)
+        self.branch7x7dbl_2 = ConvBNLayer(
+            num_channels=channels_7x7,
+            num_filters=channels_7x7,
+            filter_size=(7, 1),
+            padding=(3, 0),
+            act="relu",
+            name="inception_c_branch7x7dbl_2_" + name)
+        self.branch7x7dbl_3 = ConvBNLayer(
+            num_channels=channels_7x7,
+            num_filters=channels_7x7,
+            filter_size=(1, 7),
+            padding=(0, 3),
+            act="relu",
+            name="inception_c_branch7x7dbl_3_" + name)
+        self.branch7x7dbl_4 = ConvBNLayer(
+            num_channels=channels_7x7,
+            num_filters=channels_7x7,
+            filter_size=(7, 1),
+            padding=(3, 0),
+            act="relu",
+            name="inception_c_branch7x7dbl_4_" + name)
+        self.branch7x7dbl_5 = ConvBNLayer(
+            num_channels=channels_7x7,
+            num_filters=192,
+            filter_size=(1, 7),
+            padding=(0, 3),
+            act="relu",
+            name="inception_c_branch7x7dbl_5_" + name)
+
+        self.branch_pool = AvgPool2D(
+            kernel_size=3, stride=1, padding=1, exclusive=False)
+        self.branch_pool_conv = ConvBNLayer(
+            num_channels=num_channels,
+            num_filters=192,
+            filter_size=1,
+            act="relu",
+            name="inception_c_branch_pool_" + name)
+
     def forward(self, x):
     def forward(self, x):
         branch1x1 = self.branch1x1(x)
         branch1x1 = self.branch1x1(x)
 
 
@@ -288,47 +320,55 @@ class InceptionC(nn.Layer):
         branch_pool = self.branch_pool(x)
         branch_pool = self.branch_pool(x)
         branch_pool = self.branch_pool_conv(branch_pool)
         branch_pool = self.branch_pool_conv(branch_pool)
 
 
-        outputs = paddle.concat([branch1x1, branch7x7, branch7x7dbl, branch_pool], axis=1)
-        
+        outputs = paddle.concat(
+            [branch1x1, branch7x7, branch7x7dbl, branch_pool], axis=1)
+
         return outputs
         return outputs
-    
+
+
 class InceptionD(nn.Layer):
 class InceptionD(nn.Layer):
     def __init__(self, num_channels, name=None):
     def __init__(self, num_channels, name=None):
         super(InceptionD, self).__init__()
         super(InceptionD, self).__init__()
-        self.branch3x3_1 = ConvBNLayer(num_channels=num_channels, 
-                                       num_filters=192, 
-                                       filter_size=1, 
-                                       act="relu",
-                                       name="inception_d_branch3x3_1_"+name)
-        self.branch3x3_2 = ConvBNLayer(num_channels=192, 
-                                       num_filters=320, 
-                                       filter_size=3, 
-                                       stride=2,
-                                       act="relu",
-                                       name="inception_d_branch3x3_2_"+name)
-        self.branch7x7x3_1 = ConvBNLayer(num_channels=num_channels, 
-                                       num_filters=192, 
-                                       filter_size=1, 
-                                       act="relu",
-                                       name="inception_d_branch7x7x3_1_"+name)
-        self.branch7x7x3_2 = ConvBNLayer(num_channels=192,
-                                       num_filters=192, 
-                                       filter_size=(1, 7), 
-                                       padding=(0, 3),
-                                       act="relu",
-                                       name="inception_d_branch7x7x3_2_"+name)
-        self.branch7x7x3_3 = ConvBNLayer(num_channels=192, 
-                                       num_filters=192, 
-                                       filter_size=(7, 1), 
-                                       padding=(3, 0),
-                                       act="relu",
-                                       name="inception_d_branch7x7x3_3_"+name)
-        self.branch7x7x3_4 = ConvBNLayer(num_channels=192,  
-                                       num_filters=192, 
-                                       filter_size=3, 
-                                       stride=2,
-                                       act="relu",
-                                       name="inception_d_branch7x7x3_4_"+name)
+        self.branch3x3_1 = ConvBNLayer(
+            num_channels=num_channels,
+            num_filters=192,
+            filter_size=1,
+            act="relu",
+            name="inception_d_branch3x3_1_" + name)
+        self.branch3x3_2 = ConvBNLayer(
+            num_channels=192,
+            num_filters=320,
+            filter_size=3,
+            stride=2,
+            act="relu",
+            name="inception_d_branch3x3_2_" + name)
+        self.branch7x7x3_1 = ConvBNLayer(
+            num_channels=num_channels,
+            num_filters=192,
+            filter_size=1,
+            act="relu",
+            name="inception_d_branch7x7x3_1_" + name)
+        self.branch7x7x3_2 = ConvBNLayer(
+            num_channels=192,
+            num_filters=192,
+            filter_size=(1, 7),
+            padding=(0, 3),
+            act="relu",
+            name="inception_d_branch7x7x3_2_" + name)
+        self.branch7x7x3_3 = ConvBNLayer(
+            num_channels=192,
+            num_filters=192,
+            filter_size=(7, 1),
+            padding=(3, 0),
+            act="relu",
+            name="inception_d_branch7x7x3_3_" + name)
+        self.branch7x7x3_4 = ConvBNLayer(
+            num_channels=192,
+            num_filters=192,
+            filter_size=3,
+            stride=2,
+            act="relu",
+            name="inception_d_branch7x7x3_4_" + name)
         self.branch_pool = MaxPool2D(kernel_size=3, stride=2)
         self.branch_pool = MaxPool2D(kernel_size=3, stride=2)
 
 
     def forward(self, x):
     def forward(self, x):
@@ -341,65 +381,77 @@ class InceptionD(nn.Layer):
         branch7x7x3 = self.branch7x7x3_4(branch7x7x3)
         branch7x7x3 = self.branch7x7x3_4(branch7x7x3)
 
 
         branch_pool = self.branch_pool(x)
         branch_pool = self.branch_pool(x)
-        
+
         outputs = paddle.concat([branch3x3, branch7x7x3, branch_pool], axis=1)
         outputs = paddle.concat([branch3x3, branch7x7x3, branch_pool], axis=1)
         return outputs
         return outputs
-    
+
+
 class InceptionE(nn.Layer):
 class InceptionE(nn.Layer):
     def __init__(self, num_channels, name=None):
     def __init__(self, num_channels, name=None):
         super(InceptionE, self).__init__()
         super(InceptionE, self).__init__()
-        self.branch1x1 = ConvBNLayer(num_channels=num_channels,
-                                       num_filters=320, 
-                                       filter_size=1, 
-                                       act="relu",
-                                       name="inception_e_branch1x1_"+name)
-        self.branch3x3_1 = ConvBNLayer(num_channels=num_channels,
-                                       num_filters=384, 
-                                       filter_size=1, 
-                                       act="relu",
-                                       name="inception_e_branch3x3_1_"+name)
-        self.branch3x3_2a = ConvBNLayer(num_channels=384, 
-                                       num_filters=384, 
-                                       filter_size=(1, 3), 
-                                       padding=(0, 1),
-                                       act="relu",
-                                       name="inception_e_branch3x3_2a_"+name)
-        self.branch3x3_2b = ConvBNLayer(num_channels=384, 
-                                       num_filters=384, 
-                                       filter_size=(3, 1), 
-                                       padding=(1, 0),
-                                       act="relu",
-                                       name="inception_e_branch3x3_2b_"+name)
-        
-        self.branch3x3dbl_1 = ConvBNLayer(num_channels=num_channels, 
-                                       num_filters=448, 
-                                       filter_size=1, 
-                                       act="relu",
-                                       name="inception_e_branch3x3dbl_1_"+name)
-        self.branch3x3dbl_2 = ConvBNLayer(num_channels=448, 
-                                       num_filters=384, 
-                                       filter_size=3, 
-                                       padding=1,
-                                       act="relu",
-                                       name="inception_e_branch3x3dbl_2_"+name)
-        self.branch3x3dbl_3a = ConvBNLayer(num_channels=384,
-                                       num_filters=384, 
-                                       filter_size=(1, 3), 
-                                       padding=(0, 1),
-                                       act="relu",
-                                       name="inception_e_branch3x3dbl_3a_"+name)
-        self.branch3x3dbl_3b = ConvBNLayer(num_channels=384,
-                                       num_filters=384, 
-                                       filter_size=(3, 1), 
-                                       padding=(1, 0),
-                                       act="relu",
-                                       name="inception_e_branch3x3dbl_3b_"+name)
-        self.branch_pool = AvgPool2D(kernel_size=3, stride=1, padding=1, exclusive=False)
-        self.branch_pool_conv = ConvBNLayer(num_channels=num_channels, 
-                                       num_filters=192, 
-                                       filter_size=1, 
-                                       act="relu",
-                                       name="inception_e_branch_pool_"+name)
+        self.branch1x1 = ConvBNLayer(
+            num_channels=num_channels,
+            num_filters=320,
+            filter_size=1,
+            act="relu",
+            name="inception_e_branch1x1_" + name)
+        self.branch3x3_1 = ConvBNLayer(
+            num_channels=num_channels,
+            num_filters=384,
+            filter_size=1,
+            act="relu",
+            name="inception_e_branch3x3_1_" + name)
+        self.branch3x3_2a = ConvBNLayer(
+            num_channels=384,
+            num_filters=384,
+            filter_size=(1, 3),
+            padding=(0, 1),
+            act="relu",
+            name="inception_e_branch3x3_2a_" + name)
+        self.branch3x3_2b = ConvBNLayer(
+            num_channels=384,
+            num_filters=384,
+            filter_size=(3, 1),
+            padding=(1, 0),
+            act="relu",
+            name="inception_e_branch3x3_2b_" + name)
+
+        self.branch3x3dbl_1 = ConvBNLayer(
+            num_channels=num_channels,
+            num_filters=448,
+            filter_size=1,
+            act="relu",
+            name="inception_e_branch3x3dbl_1_" + name)
+        self.branch3x3dbl_2 = ConvBNLayer(
+            num_channels=448,
+            num_filters=384,
+            filter_size=3,
+            padding=1,
+            act="relu",
+            name="inception_e_branch3x3dbl_2_" + name)
+        self.branch3x3dbl_3a = ConvBNLayer(
+            num_channels=384,
+            num_filters=384,
+            filter_size=(1, 3),
+            padding=(0, 1),
+            act="relu",
+            name="inception_e_branch3x3dbl_3a_" + name)
+        self.branch3x3dbl_3b = ConvBNLayer(
+            num_channels=384,
+            num_filters=384,
+            filter_size=(3, 1),
+            padding=(1, 0),
+            act="relu",
+            name="inception_e_branch3x3dbl_3b_" + name)
+        self.branch_pool = AvgPool2D(
+            kernel_size=3, stride=1, padding=1, exclusive=False)
+        self.branch_pool_conv = ConvBNLayer(
+            num_channels=num_channels,
+            num_filters=192,
+            filter_size=1,
+            act="relu",
+            name="inception_e_branch_pool_" + name)
+
     def forward(self, x):
     def forward(self, x):
         branch1x1 = self.branch1x1(x)
         branch1x1 = self.branch1x1(x)
 
 
@@ -421,44 +473,53 @@ class InceptionE(nn.Layer):
         branch_pool = self.branch_pool(x)
         branch_pool = self.branch_pool(x)
         branch_pool = self.branch_pool_conv(branch_pool)
         branch_pool = self.branch_pool_conv(branch_pool)
 
 
-        outputs = paddle.concat([branch1x1, branch3x3, branch3x3dbl, branch_pool], axis=1)
-        return outputs   
+        outputs = paddle.concat(
+            [branch1x1, branch3x3, branch3x3dbl, branch_pool], axis=1)
+        return outputs
+
 
 
-    
 class InceptionV3(nn.Layer):
 class InceptionV3(nn.Layer):
     def __init__(self, class_dim=1000):
     def __init__(self, class_dim=1000):
         super(InceptionV3, self).__init__()
         super(InceptionV3, self).__init__()
         self.inception_a_list = [[192, 256, 288], [32, 64, 64]]
         self.inception_a_list = [[192, 256, 288], [32, 64, 64]]
         self.inception_c_list = [[768, 768, 768, 768], [128, 160, 160, 192]]
         self.inception_c_list = [[768, 768, 768, 768], [128, 160, 160, 192]]
-        
+
         self.inception_stem = InceptionStem()
         self.inception_stem = InceptionStem()
         self.inception_block_list = []
         self.inception_block_list = []
         for i in range(len(self.inception_a_list[0])):
         for i in range(len(self.inception_a_list[0])):
-            inception_a = self.add_sublayer("inception_a_"+str(i+1),
-                                           InceptionA(self.inception_a_list[0][i], 
-                                                      self.inception_a_list[1][i], 
-                                                      name=str(i+1)))
+            inception_a = self.add_sublayer(
+                "inception_a_" + str(i + 1),
+                InceptionA(
+                    self.inception_a_list[0][i],
+                    self.inception_a_list[1][i],
+                    name=str(i + 1)))
             self.inception_block_list.append(inception_a)
             self.inception_block_list.append(inception_a)
-        inception_b = self.add_sublayer("nception_b_1",
-                                        InceptionB(288, name="1"))
+        inception_b = self.add_sublayer(
+            "nception_b_1", InceptionB(
+                288, name="1"))
         self.inception_block_list.append(inception_b)
         self.inception_block_list.append(inception_b)
-        
+
         for i in range(len(self.inception_c_list[0])):
         for i in range(len(self.inception_c_list[0])):
-            inception_c = self.add_sublayer("inception_c_"+str(i+1),
-                                           InceptionC(self.inception_c_list[0][i], 
-                                                      self.inception_c_list[1][i], 
-                                                      name=str(i+1)))
+            inception_c = self.add_sublayer(
+                "inception_c_" + str(i + 1),
+                InceptionC(
+                    self.inception_c_list[0][i],
+                    self.inception_c_list[1][i],
+                    name=str(i + 1)))
             self.inception_block_list.append(inception_c)
             self.inception_block_list.append(inception_c)
-        inception_d = self.add_sublayer("inception_d_1",
-                                        InceptionD(768, name="1"))
+        inception_d = self.add_sublayer(
+            "inception_d_1", InceptionD(
+                768, name="1"))
         self.inception_block_list.append(inception_d)
         self.inception_block_list.append(inception_d)
-        inception_e = self.add_sublayer("inception_e_1", 
-                                        InceptionE(1280, name="1"))
+        inception_e = self.add_sublayer(
+            "inception_e_1", InceptionE(
+                1280, name="1"))
         self.inception_block_list.append(inception_e)
         self.inception_block_list.append(inception_e)
-        inception_e = self.add_sublayer("inception_e_2",
-                                        InceptionE(2048, name="2"))
+        inception_e = self.add_sublayer(
+            "inception_e_2", InceptionE(
+                2048, name="2"))
         self.inception_block_list.append(inception_e)
         self.inception_block_list.append(inception_e)
- 
+
         self.gap = AdaptiveAvgPool2D(1)
         self.gap = AdaptiveAvgPool2D(1)
         self.drop = Dropout(p=0.2, mode="downscale_in_infer")
         self.drop = Dropout(p=0.2, mode="downscale_in_infer")
         stdv = 1.0 / math.sqrt(2048 * 1.0)
         stdv = 1.0 / math.sqrt(2048 * 1.0)
@@ -472,10 +533,9 @@ class InceptionV3(nn.Layer):
     def forward(self, x):
     def forward(self, x):
         y = self.inception_stem(x)
         y = self.inception_stem(x)
         for inception_block in self.inception_block_list:
         for inception_block in self.inception_block_list:
-           y = inception_block(y)
+            y = inception_block(y)
         y = self.gap(y)
         y = self.gap(y)
         y = paddle.reshape(y, shape=[-1, 2048])
         y = paddle.reshape(y, shape=[-1, 2048])
         y = self.drop(y)
         y = self.drop(y)
         y = self.out(y)
         y = self.out(y)
         return y
         return y
-    

+ 6 - 5
paddlex/ppcls/modeling/architectures/resnet.py

@@ -183,7 +183,11 @@ class BasicBlock(nn.Layer):
 
 
 
 
 class ResNet(nn.Layer):
 class ResNet(nn.Layer):
-    def __init__(self, layers=50, class_dim=1000, input_image_channel=3, data_format="NCHW"):
+    def __init__(self,
+                 layers=50,
+                 class_dim=1000,
+                 input_image_channel=3,
+                 data_format="NCHW"):
         super(ResNet, self).__init__()
         super(ResNet, self).__init__()
 
 
         self.layers = layers
         self.layers = layers
@@ -216,10 +220,7 @@ class ResNet(nn.Layer):
             name="conv1",
             name="conv1",
             data_format=self.data_format)
             data_format=self.data_format)
         self.pool2d_max = MaxPool2D(
         self.pool2d_max = MaxPool2D(
-            kernel_size=3,
-            stride=2, 
-            padding=1,
-            data_format=self.data_format)
+            kernel_size=3, stride=2, padding=1, data_format=self.data_format)
 
 
         self.block_list = []
         self.block_list = []
         if layers >= 50:
         if layers >= 50:

+ 8 - 2
paddlex/ppcls/modeling/architectures/resnext.py

@@ -135,7 +135,12 @@ class BottleneckBlock(nn.Layer):
 
 
 
 
 class ResNeXt(nn.Layer):
 class ResNeXt(nn.Layer):
-    def __init__(self, layers=50, class_dim=1000, cardinality=32, input_image_channel=3, data_format="NCHW"):
+    def __init__(self,
+                 layers=50,
+                 class_dim=1000,
+                 cardinality=32,
+                 input_image_channel=3,
+                 data_format="NCHW"):
         super(ResNeXt, self).__init__()
         super(ResNeXt, self).__init__()
 
 
         self.layers = layers
         self.layers = layers
@@ -168,7 +173,8 @@ class ResNeXt(nn.Layer):
             act='relu',
             act='relu',
             name="res_conv1",
             name="res_conv1",
             data_format=self.data_format)
             data_format=self.data_format)
-        self.pool2d_max = MaxPool2D(kernel_size=3, stride=2, padding=1, data_format=self.data_format)
+        self.pool2d_max = MaxPool2D(
+            kernel_size=3, stride=2, padding=1, data_format=self.data_format)
 
 
         self.block_list = []
         self.block_list = []
         for block in range(len(depth)):
         for block in range(len(depth)):

+ 12 - 8
paddlex/ppcls/modeling/loss.py

@@ -15,7 +15,9 @@
 import paddle
 import paddle
 import paddle.nn.functional as F
 import paddle.nn.functional as F
 
 
-__all__ = ['CELoss', 'MixCELoss', 'GoogLeNetLoss', 'JSDivLoss', 'MultiLabelLoss']
+__all__ = [
+    'CELoss', 'MixCELoss', 'GoogLeNetLoss', 'JSDivLoss', 'MultiLabelLoss'
+]
 
 
 
 
 class Loss(object):
 class Loss(object):
@@ -41,13 +43,15 @@ class Loss(object):
         soft_target = F.label_smooth(one_hot_target, epsilon=self._epsilon)
         soft_target = F.label_smooth(one_hot_target, epsilon=self._epsilon)
         soft_target = paddle.reshape(soft_target, shape=[-1, self._class_dim])
         soft_target = paddle.reshape(soft_target, shape=[-1, self._class_dim])
         return soft_target
         return soft_target
-    
+
     def _binary_crossentropy(self, input, target):
     def _binary_crossentropy(self, input, target):
         if self._label_smoothing:
         if self._label_smoothing:
             target = self._labelsmoothing(target)
             target = self._labelsmoothing(target)
-            cost = F.binary_cross_entropy_with_logits(logit=input, label=target)
+            cost = F.binary_cross_entropy_with_logits(
+                logit=input, label=target)
         else:
         else:
-            cost = F.binary_cross_entropy_with_logits(logit=input, label=target)
+            cost = F.binary_cross_entropy_with_logits(
+                logit=input, label=target)
 
 
         avg_cost = paddle.mean(cost)
         avg_cost = paddle.mean(cost)
 
 
@@ -59,7 +63,7 @@ class Loss(object):
             input = -F.log_softmax(input, axis=-1)
             input = -F.log_softmax(input, axis=-1)
             cost = paddle.sum(target * input, axis=-1)
             cost = paddle.sum(target * input, axis=-1)
         else:
         else:
-            cost = F.cross_entropy(input=input, label=target) 
+            cost = F.cross_entropy(input=input, label=target)
         avg_cost = paddle.mean(cost)
         avg_cost = paddle.mean(cost)
         return avg_cost
         return avg_cost
 
 
@@ -79,8 +83,8 @@ class Loss(object):
 
 
     def __call__(self, input, target):
     def __call__(self, input, target):
         pass
         pass
-    
-    
+
+
 class MultiLabelLoss(Loss):
 class MultiLabelLoss(Loss):
     """
     """
     Multilabel loss based binary cross entropy
     Multilabel loss based binary cross entropy
@@ -119,7 +123,7 @@ class MixCELoss(Loss):
     def __call__(self, input, target0, target1, lam):
     def __call__(self, input, target0, target1, lam):
         cost0 = self._crossentropy(input, target0)
         cost0 = self._crossentropy(input, target0)
         cost1 = self._crossentropy(input, target1)
         cost1 = self._crossentropy(input, target1)
-        cost = lam * cost0 + (1.0 - lam) * cost1  
+        cost = lam * cost0 + (1.0 - lam) * cost1
         avg_cost = paddle.mean(cost)
         avg_cost = paddle.mean(cost)
         return avg_cost
         return avg_cost
 
 

+ 1 - 1
paddlex/ppcls/utils/check.py

@@ -82,7 +82,7 @@ def check_architecture(architecture):
 
 
 def check_model_with_running_mode(architecture):
 def check_model_with_running_mode(architecture):
     """
     """
-    check whether the model is consistent with the operating mode 
+    check whether the model is consistent with the operating mode
     """
     """
     # some model are not supported in the static mode
     # some model are not supported in the static mode
     blacklist = get_blacklist_model_in_static_mode()
     blacklist = get_blacklist_model_in_static_mode()

+ 1 - 1
paddlex/ppcls/utils/logger.py

@@ -83,7 +83,7 @@ def scaler(name, value, step, writer):
     This function will draw a scalar curve generated by the visualdl.
     This function will draw a scalar curve generated by the visualdl.
     Usage: Install visualdl: pip3 install visualdl==2.0.0b4
     Usage: Install visualdl: pip3 install visualdl==2.0.0b4
            and then:
            and then:
-           visualdl --logdir ./scalar --host 0.0.0.0 --port 8830 
+           visualdl --logdir ./scalar --host 0.0.0.0 --port 8830
            to preview loss corve in real time.
            to preview loss corve in real time.
     """
     """
     writer.add_scalar(tag=name, step=step, value=value)
     writer.add_scalar(tag=name, step=step, value=value)

+ 8 - 3
paddlex/ppcls/utils/metrics.py

@@ -25,7 +25,10 @@ from sklearn.preprocessing import binarize
 
 
 import numpy as np
 import numpy as np
 
 
-__all__ = ["multi_hot_encode", "hamming_distance", "accuracy_score", "precision_recall_fscore", "mean_average_precision"]
+__all__ = [
+    "multi_hot_encode", "hamming_distance", "accuracy_score",
+    "precision_recall_fscore", "mean_average_precision"
+]
 
 
 
 
 def multi_hot_encode(logits, threshold=0.5):
 def multi_hot_encode(logits, threshold=0.5):
@@ -70,7 +73,8 @@ def accuracy_score(output, target, base="sample"):
         tps = mcm[:, 1, 1]
         tps = mcm[:, 1, 1]
         fps = mcm[:, 0, 1]
         fps = mcm[:, 0, 1]
 
 
-        accuracy = (sum(tps) + sum(tns)) / (sum(tps) + sum(tns) + sum(fns) + sum(fps))
+        accuracy = (sum(tps) + sum(tns)) / (
+            sum(tps) + sum(tns) + sum(fns) + sum(fps))
 
 
     return accuracy
     return accuracy
 
 
@@ -84,7 +88,8 @@ def precision_recall_fscore(output, target):
         fscores:
         fscores:
     """
     """
 
 
-    precisions, recalls, fscores, _ = precision_recall_fscore_support(target, output)
+    precisions, recalls, fscores, _ = precision_recall_fscore_support(target,
+                                                                      output)
 
 
     return precisions, recalls, fscores
     return precisions, recalls, fscores
 
 

+ 4 - 1
paddlex/ppdet/core/workspace.py

@@ -155,7 +155,10 @@ def merge_config(config, another_cfg=None):
 
 
 
 
 def get_registered_modules():
 def get_registered_modules():
-    return {k: v for k, v in global_config.items() if isinstance(v, SchemaDict)}
+    return {
+        k: v
+        for k, v in global_config.items() if isinstance(v, SchemaDict)
+    }
 
 
 
 
 def make_partial(cls):
 def make_partial(cls):

+ 12 - 12
paddlex/ppdet/data/__init__.py

@@ -1,15 +1,15 @@
-# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. 
-#   
-# Licensed under the Apache License, Version 2.0 (the "License");   
-# you may not use this file except in compliance with the License.  
-# You may obtain a copy of the License at   
-#   
-#     http://www.apache.org/licenses/LICENSE-2.0    
-#   
-# Unless required by applicable law or agreed to in writing, software   
-# distributed under the License is distributed on an "AS IS" BASIS, 
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  
-# See the License for the specific language governing permissions and   
+# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
 # limitations under the License.
 # limitations under the License.
 
 
 from . import source
 from . import source

+ 8 - 8
paddlex/ppdet/data/reader.py

@@ -120,7 +120,7 @@ class BaseDataLoader(object):
         collate_batch (bool): whether to collate batch in dataloader.
         collate_batch (bool): whether to collate batch in dataloader.
             If set to True, the samples will collate into batch according
             If set to True, the samples will collate into batch according
             to the batch size. Otherwise, the ground-truth will not collate,
             to the batch size. Otherwise, the ground-truth will not collate,
-            which is used when the number of ground-truch is different in 
+            which is used when the number of ground-truch is different in
             samples.
             samples.
         use_shared_memory (bool): whether to use shared memory to
         use_shared_memory (bool): whether to use shared memory to
                 accelerate data loading, enable this only if you
                 accelerate data loading, enable this only if you
@@ -146,7 +146,7 @@ class BaseDataLoader(object):
         self._sample_transforms = Compose(
         self._sample_transforms = Compose(
             sample_transforms, num_classes=num_classes)
             sample_transforms, num_classes=num_classes)
 
 
-        # batch transfrom 
+        # batch transfrom
         self._batch_transforms = BatchCompose(batch_transforms, num_classes,
         self._batch_transforms = BatchCompose(batch_transforms, num_classes,
                                               collate_batch)
                                               collate_batch)
         self.batch_size = batch_size
         self.batch_size = batch_size
@@ -282,9 +282,9 @@ class EvalMOTReader(BaseDataLoader):
                  drop_last=False,
                  drop_last=False,
                  num_classes=1,
                  num_classes=1,
                  **kwargs):
                  **kwargs):
-        super(EvalMOTReader, self).__init__(sample_transforms, batch_transforms,
-                                            batch_size, shuffle, drop_last,
-                                            num_classes, **kwargs)
+        super(EvalMOTReader, self).__init__(
+            sample_transforms, batch_transforms, batch_size, shuffle,
+            drop_last, num_classes, **kwargs)
 
 
 
 
 @register
 @register
@@ -299,6 +299,6 @@ class TestMOTReader(BaseDataLoader):
                  drop_last=False,
                  drop_last=False,
                  num_classes=1,
                  num_classes=1,
                  **kwargs):
                  **kwargs):
-        super(TestMOTReader, self).__init__(sample_transforms, batch_transforms,
-                                            batch_size, shuffle, drop_last,
-                                            num_classes, **kwargs)
+        super(TestMOTReader, self).__init__(
+            sample_transforms, batch_transforms, batch_size, shuffle,
+            drop_last, num_classes, **kwargs)

+ 12 - 12
paddlex/ppdet/data/source/category.py

@@ -1,15 +1,15 @@
-# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. 
-#   
-# Licensed under the Apache License, Version 2.0 (the "License");   
-# you may not use this file except in compliance with the License.  
-# You may obtain a copy of the License at   
-#   
-#     http://www.apache.org/licenses/LICENSE-2.0    
-#   
-# Unless required by applicable law or agreed to in writing, software   
-# distributed under the License is distributed on an "AS IS" BASIS, 
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  
-# See the License for the specific language governing permissions and   
+# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
 # limitations under the License.
 # limitations under the License.
 
 
 from __future__ import absolute_import
 from __future__ import absolute_import

+ 24 - 22
paddlex/ppdet/data/source/coco.py

@@ -1,15 +1,15 @@
-# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. 
-#   
-# Licensed under the Apache License, Version 2.0 (the "License");   
-# you may not use this file except in compliance with the License.  
-# You may obtain a copy of the License at   
-#   
-#     http://www.apache.org/licenses/LICENSE-2.0    
-#   
-# Unless required by applicable law or agreed to in writing, software   
-# distributed under the License is distributed on an "AS IS" BASIS, 
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  
-# See the License for the specific language governing permissions and   
+# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
 # limitations under the License.
 # limitations under the License.
 
 
 import os
 import os
@@ -33,11 +33,11 @@ class COCODataSet(DetDataset):
         anno_path (str): coco annotation file path.
         anno_path (str): coco annotation file path.
         data_fields (list): key name of data dictionary, at least have 'image'.
         data_fields (list): key name of data dictionary, at least have 'image'.
         sample_num (int): number of samples to load, -1 means all.
         sample_num (int): number of samples to load, -1 means all.
-        load_crowd (bool): whether to load crowded ground-truth. 
+        load_crowd (bool): whether to load crowded ground-truth.
             False as default
             False as default
         allow_empty (bool): whether to load empty entry. False as default
         allow_empty (bool): whether to load empty entry. False as default
-        empty_ratio (float): the ratio of empty record number to total 
-            record's, if empty_ratio is out of [0. ,1.), do not sample the 
+        empty_ratio (float): the ratio of empty record number to total
+            record's, if empty_ratio is out of [0. ,1.), do not sample the
             records. 1. as default
             records. 1. as default
     """
     """
 
 
@@ -90,8 +90,9 @@ class COCODataSet(DetDataset):
 
 
         if 'annotations' not in coco.dataset:
         if 'annotations' not in coco.dataset:
             self.load_image_only = True
             self.load_image_only = True
-            logger.warning('Annotation file: {} does not contains ground truth '
-                           'and load image information only.'.format(anno_path))
+            logger.warning(
+                'Annotation file: {} does not contains ground truth '
+                'and load image information only.'.format(anno_path))
 
 
         for img_id in img_ids:
         for img_id in img_ids:
             img_anno = coco.loadImgs([img_id])[0]
             img_anno = coco.loadImgs([img_id])[0]
@@ -108,9 +109,9 @@ class COCODataSet(DetDataset):
                 continue
                 continue
 
 
             if im_w < 0 or im_h < 0:
             if im_w < 0 or im_h < 0:
-                logger.warning('Illegal width: {} or height: {} in annotation, '
-                               'and im_id: {} will be ignored'.format(
-                                   im_w, im_h, img_id))
+                logger.warning(
+                    'Illegal width: {} or height: {} in annotation, '
+                    'and im_id: {} will be ignored'.format(im_w, im_h, img_id))
                 continue
                 continue
 
 
             coco_rec = {
             coco_rec = {
@@ -122,7 +123,8 @@ class COCODataSet(DetDataset):
 
 
             if not self.load_image_only:
             if not self.load_image_only:
                 ins_anno_ids = coco.getAnnIds(
                 ins_anno_ids = coco.getAnnIds(
-                    imgIds=[img_id], iscrowd=None if self.load_crowd else False)
+                    imgIds=[img_id],
+                    iscrowd=None if self.load_crowd else False)
                 instances = coco.loadAnns(ins_anno_ids)
                 instances = coco.loadAnns(ins_anno_ids)
 
 
                 bboxes = []
                 bboxes = []
@@ -187,7 +189,7 @@ class COCODataSet(DetDataset):
                     if is_rbox_anno:
                     if is_rbox_anno:
                         gt_rbox[i, :] = box['clean_rbox']
                         gt_rbox[i, :] = box['clean_rbox']
                     is_crowd[i][0] = box['iscrowd']
                     is_crowd[i][0] = box['iscrowd']
-                    # check RLE format 
+                    # check RLE format
                     if 'segmentation' in box and box['iscrowd'] == 1:
                     if 'segmentation' in box and box['iscrowd'] == 1:
                         gt_poly[i] = [[0.0, 0.0], ]
                         gt_poly[i] = [[0.0, 0.0], ]
                     elif 'segmentation' in box and box['segmentation']:
                     elif 'segmentation' in box and box['segmentation']:

+ 12 - 12
paddlex/ppdet/data/source/dataset.py

@@ -1,15 +1,15 @@
-# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. 
-#   
-# Licensed under the Apache License, Version 2.0 (the "License");   
-# you may not use this file except in compliance with the License.  
-# You may obtain a copy of the License at   
-#   
-#     http://www.apache.org/licenses/LICENSE-2.0    
-#   
-# Unless required by applicable law or agreed to in writing, software   
-# distributed under the License is distributed on an "AS IS" BASIS, 
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  
-# See the License for the specific language governing permissions and   
+# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
 # limitations under the License.
 # limitations under the License.
 
 
 import os
 import os

+ 12 - 12
paddlex/ppdet/data/source/keypoint_coco.py

@@ -1,15 +1,15 @@
-# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. 
-#   
-# Licensed under the Apache License, Version 2.0 (the "License");   
-# you may not use this file except in compliance with the License.  
-# You may obtain a copy of the License at   
-#   
-#     http://www.apache.org/licenses/LICENSE-2.0    
-#   
-# Unless required by applicable law or agreed to in writing, software   
-# distributed under the License is distributed on an "AS IS" BASIS, 
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  
-# See the License for the specific language governing permissions and   
+# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
 # limitations under the License.
 # limitations under the License.
 
 
 import os
 import os

+ 5 - 5
paddlex/ppdet/data/source/mot.py

@@ -41,10 +41,10 @@ class MOTDataSet(DetDataset):
         MOT datasets root directory following this:
         MOT datasets root directory following this:
             dataset/mot
             dataset/mot
             |——————image_lists
             |——————image_lists
-            |        |——————caltech.train  
-            |        |——————caltech.val   
-            |        |——————mot16.train  
-            |        |——————mot17.train  
+            |        |——————caltech.train
+            |        |——————caltech.val
+            |        |——————mot16.train
+            |        |——————mot17.train
             |        ......
             |        ......
             |——————Caltech
             |——————Caltech
             |——————MOT17
             |——————MOT17
@@ -302,7 +302,7 @@ class MOTVideoDataset(DetDataset):
     Args:
     Args:
         video_file (str): path of the video file
         video_file (str): path of the video file
         dataset_dir (str): root directory for dataset.
         dataset_dir (str): root directory for dataset.
-        keep_ori_im (bool): whether to keep original image, default False. 
+        keep_ori_im (bool): whether to keep original image, default False.
             Set True when used during MOT model inference while saving
             Set True when used during MOT model inference while saving
             images or video, or used in DeepSORT.
             images or video, or used in DeepSORT.
     """
     """

+ 2 - 2
paddlex/ppdet/data/source/voc.py

@@ -90,8 +90,8 @@ class VOCDataSet(DetDataset):
                         for x in line.strip().split()[:2]]
                         for x in line.strip().split()[:2]]
                 if not os.path.exists(img_file):
                 if not os.path.exists(img_file):
                     logger.warn(
                     logger.warn(
-                        'Illegal image file: {}, and it will be ignored'.format(
-                            img_file))
+                        'Illegal image file: {}, and it will be ignored'.
+                        format(img_file))
                     continue
                     continue
                 if not os.path.isfile(xml_file):
                 if not os.path.isfile(xml_file):
                     logger.warn('Illegal xml file: {}, and it will be ignored'.
                     logger.warn('Illegal xml file: {}, and it will be ignored'.

+ 24 - 19
paddlex/ppdet/data/transform/autoaugment_utils.py

@@ -11,7 +11,7 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # See the License for the specific language governing permissions and
 # limitations under the License.
 # limitations under the License.
-# Reference: 
+# Reference:
 #   https://github.com/tensorflow/tpu/blob/master/models/official/detection/utils/autoaugment_utils.py
 #   https://github.com/tensorflow/tpu/blob/master/models/official/detection/utils/autoaugment_utils.py
 """AutoAugment util file."""
 """AutoAugment util file."""
 
 
@@ -67,7 +67,7 @@ def policy_v1():
         [('ShearY_Only_BBoxes', 0.8, 2), ('Flip_Only_BBoxes', 0.0, 10)],
         [('ShearY_Only_BBoxes', 0.8, 2), ('Flip_Only_BBoxes', 0.0, 10)],
         [('Equalize', 0.6, 10), ('TranslateX_BBox', 0.2, 2)],
         [('Equalize', 0.6, 10), ('TranslateX_BBox', 0.2, 2)],
         [('Color', 1.0, 10), ('TranslateY_Only_BBoxes', 0.4, 6)],
         [('Color', 1.0, 10), ('TranslateY_Only_BBoxes', 0.4, 6)],
-        [('Rotate_BBox', 0.8, 10), ('Contrast', 0.0, 10)],  # , 
+        [('Rotate_BBox', 0.8, 10), ('Contrast', 0.0, 10)],  # ,
         [('Cutout', 0.2, 2), ('Brightness', 0.8, 10)],
         [('Cutout', 0.2, 2), ('Brightness', 0.8, 10)],
         [('Color', 1.0, 6), ('Equalize', 1.0, 2)],
         [('Color', 1.0, 6), ('Equalize', 1.0, 2)],
         [('Cutout_Only_BBoxes', 0.4, 6), ('TranslateY_Only_BBoxes', 0.8, 2)],
         [('Cutout_Only_BBoxes', 0.4, 6), ('TranslateY_Only_BBoxes', 0.8, 2)],
@@ -113,7 +113,8 @@ def policy_v2():
         [('Cutout', 0.8, 8), ('Brightness', 0.8, 8), ('Cutout', 0.2, 2)],
         [('Cutout', 0.8, 8), ('Brightness', 0.8, 8), ('Cutout', 0.2, 2)],
         [('Color', 0.8, 4), ('TranslateY_BBox', 1.0, 6),
         [('Color', 0.8, 4), ('TranslateY_BBox', 1.0, 6),
          ('Rotate_BBox', 0.6, 6)],
          ('Rotate_BBox', 0.6, 6)],
-        [('Rotate_BBox', 0.6, 10), ('BBox_Cutout', 1.0, 4), ('Cutout', 0.2, 8)],
+        [('Rotate_BBox', 0.6, 10), ('BBox_Cutout', 1.0, 4), ('Cutout', 0.2, 8)
+         ],
         [('Rotate_BBox', 0.0, 0), ('Equalize', 0.6, 6),
         [('Rotate_BBox', 0.0, 0), ('Equalize', 0.6, 6),
          ('ShearY_BBox', 0.6, 8)],
          ('ShearY_BBox', 0.6, 8)],
         [('Brightness', 0.8, 8), ('AutoContrast', 0.4, 2),
         [('Brightness', 0.8, 8), ('AutoContrast', 0.4, 2),
@@ -365,8 +366,10 @@ def random_shift_bbox(image,
                                        2.0))
                                        2.0))
     maxval_y = clip_y(min_y + np.int32(pixel_scaling * float(bbox_height) /
     maxval_y = clip_y(min_y + np.int32(pixel_scaling * float(bbox_height) /
                                        2.0))
                                        2.0))
-    minval_x = clip_x(min_x - np.int32(pixel_scaling * float(bbox_width) / 2.0))
-    maxval_x = clip_x(min_x + np.int32(pixel_scaling * float(bbox_width) / 2.0))
+    minval_x = clip_x(min_x - np.int32(pixel_scaling * float(bbox_width) /
+                                       2.0))
+    maxval_x = clip_x(min_x + np.int32(pixel_scaling * float(bbox_width) /
+                                       2.0))
 
 
     # Sample and calculate the new unclipped min/max coordinates of the new bbox.
     # Sample and calculate the new unclipped min/max coordinates of the new bbox.
     if new_min_bbox_coords is None:
     if new_min_bbox_coords is None:
@@ -403,8 +406,8 @@ def random_shift_bbox(image,
     bbox_content = image[shifted_min_y:shifted_max_y + 1, shifted_min_x:
     bbox_content = image[shifted_min_y:shifted_max_y + 1, shifted_min_x:
                          shifted_max_x + 1, :]
                          shifted_max_x + 1, :]
 
 
-    def mask_and_add_image(min_y_, min_x_, max_y_, max_x_, mask, content_tensor,
-                           image_):
+    def mask_and_add_image(min_y_, min_x_, max_y_, max_x_, mask,
+                           content_tensor, image_):
         """Applies mask to bbox region in image then adds content_tensor to it."""
         """Applies mask to bbox region in image then adds content_tensor to it."""
         mask = np.pad(mask, [[min_y_, (image_height - 1) - max_y_],
         mask = np.pad(mask, [[min_y_, (image_height - 1) - max_y_],
                              [min_x_, (image_width - 1) - max_x_], [0, 0]],
                              [min_x_, (image_width - 1) - max_x_], [0, 0]],
@@ -426,8 +429,8 @@ def random_shift_bbox(image,
 
 
     # Fill in bbox content to new bbox location.
     # Fill in bbox content to new bbox location.
     mask = np.zeros_like(bbox_content)
     mask = np.zeros_like(bbox_content)
-    image = mask_and_add_image(new_min_y, new_min_x, new_max_y, new_max_x, mask,
-                               bbox_content, image)
+    image = mask_and_add_image(new_min_y, new_min_x, new_max_y, new_max_x,
+                               mask, bbox_content, image)
 
 
     return image.astype(np.uint8), new_bbox
     return image.astype(np.uint8), new_bbox
 
 
@@ -609,8 +612,8 @@ def _apply_bbox_augmentation_wrapper(image, bbox, new_bboxes, prob,
             augmented_image, bbox = (image, bbox)
             augmented_image, bbox = (image, bbox)
     else:
     else:
         if should_apply_op:
         if should_apply_op:
-            augmented_image = _apply_bbox_augmentation(image, bbox,
-                                                       augmentation_func, *args)
+            augmented_image = _apply_bbox_augmentation(
+                image, bbox, augmentation_func, *args)
         else:
         else:
             augmented_image = image
             augmented_image = image
     new_bboxes = _concat_bbox(bbox, new_bboxes)
     new_bboxes = _concat_bbox(bbox, new_bboxes)
@@ -761,16 +764,16 @@ def solarize_only_bboxes(image, bboxes, prob, threshold):
     """Apply solarize to each bbox in the image with probability prob."""
     """Apply solarize to each bbox in the image with probability prob."""
     func_changes_bbox = False
     func_changes_bbox = False
     prob = _scale_bbox_only_op_probability(prob)
     prob = _scale_bbox_only_op_probability(prob)
-    return _apply_multi_bbox_augmentation_wrapper(image, bboxes, prob, solarize,
-                                                  func_changes_bbox, threshold)
+    return _apply_multi_bbox_augmentation_wrapper(
+        image, bboxes, prob, solarize, func_changes_bbox, threshold)
 
 
 
 
 def equalize_only_bboxes(image, bboxes, prob):
 def equalize_only_bboxes(image, bboxes, prob):
     """Apply equalize to each bbox in the image with probability prob."""
     """Apply equalize to each bbox in the image with probability prob."""
     func_changes_bbox = False
     func_changes_bbox = False
     prob = _scale_bbox_only_op_probability(prob)
     prob = _scale_bbox_only_op_probability(prob)
-    return _apply_multi_bbox_augmentation_wrapper(image, bboxes, prob, equalize,
-                                                  func_changes_bbox)
+    return _apply_multi_bbox_augmentation_wrapper(image, bboxes, prob,
+                                                  equalize, func_changes_bbox)
 
 
 
 
 def cutout_only_bboxes(image, bboxes, prob, pad_size, replace):
 def cutout_only_bboxes(image, bboxes, prob, pad_size, replace):
@@ -1087,7 +1090,8 @@ def sharpness(image, factor):
     image = image.astype(np.float32)
     image = image.astype(np.float32)
     # Make image 4D for conv operation.
     # Make image 4D for conv operation.
     # SMOOTH PIL Kernel.
     # SMOOTH PIL Kernel.
-    kernel = np.array([[1, 1, 1], [1, 5, 1], [1, 1, 1]], dtype=np.float32) / 13.
+    kernel = np.array(
+        [[1, 1, 1], [1, 5, 1], [1, 1, 1]], dtype=np.float32) / 13.
     result = cv2.filter2D(image, -1, kernel).astype(np.uint8)
     result = cv2.filter2D(image, -1, kernel).astype(np.uint8)
 
 
     # Blend the final result.
     # Blend the final result.
@@ -1388,9 +1392,10 @@ def _translate_level_to_arg(level, translate_const):
 
 
 
 
 def _bbox_cutout_level_to_arg(level, hparams):
 def _bbox_cutout_level_to_arg(level, hparams):
-    cutout_pad_fraction = (level /
-                           _MAX_LEVEL) * 0.75  # hparams.cutout_max_pad_fraction
-    return (cutout_pad_fraction, False)  # hparams.cutout_bbox_replace_with_mean
+    cutout_pad_fraction = (
+        level / _MAX_LEVEL) * 0.75  # hparams.cutout_max_pad_fraction
+    return (cutout_pad_fraction, False
+            )  # hparams.cutout_bbox_replace_with_mean
 
 
 
 
 def level_to_arg(hparams):
 def level_to_arg(hparams):

+ 27 - 17
paddlex/ppdet/data/transform/batch_operators.py

@@ -146,7 +146,8 @@ class BatchRandomResize(BaseOperator):
         else:
         else:
             interp = self.interp
             interp = self.interp
 
 
-        resizer = Resize(target_size, keep_ratio=self.keep_ratio, interp=interp)
+        resizer = Resize(
+            target_size, keep_ratio=self.keep_ratio, interp=interp)
         return resizer(samples, context=context)
         return resizer(samples, context=context)
 
 
 
 
@@ -214,7 +215,7 @@ class Gt2YoloTarget(BaseOperator):
                     gi = int(gx * grid_w)
                     gi = int(gx * grid_w)
                     gj = int(gy * grid_h)
                     gj = int(gy * grid_h)
 
 
-                    # gtbox should be regresed in this layes if best match 
+                    # gtbox should be regresed in this layes if best match
                     # anchor index in anchor mask of this layer
                     # anchor index in anchor mask of this layer
                     if best_idx in mask:
                     if best_idx in mask:
                         best_n = mask.index(best_idx)
                         best_n = mask.index(best_idx)
@@ -234,7 +235,7 @@ class Gt2YoloTarget(BaseOperator):
                         # classification
                         # classification
                         target[best_n, 6 + cls, gj, gi] = 1.
                         target[best_n, 6 + cls, gj, gi] = 1.
 
 
-                    # For non-matched anchors, calculate the target if the iou 
+                    # For non-matched anchors, calculate the target if the iou
                     # between anchor and gt is larger than iou_thresh
                     # between anchor and gt is larger than iou_thresh
                     if self.iou_thresh < 1:
                     if self.iou_thresh < 1:
                         for idx, mask_i in enumerate(mask):
                         for idx, mask_i in enumerate(mask):
@@ -286,7 +287,8 @@ class Gt2FCOSTarget(BaseOperator):
         object_sizes_of_interest = []
         object_sizes_of_interest = []
         for i in range(len(self.object_sizes_boundary) - 1):
         for i in range(len(self.object_sizes_boundary) - 1):
             object_sizes_of_interest.append([
             object_sizes_of_interest.append([
-                self.object_sizes_boundary[i], self.object_sizes_boundary[i + 1]
+                self.object_sizes_boundary[i],
+                self.object_sizes_boundary[i + 1]
             ])
             ])
         self.object_sizes_of_interest = object_sizes_of_interest
         self.object_sizes_of_interest = object_sizes_of_interest
         self.norm_reg_targets = norm_reg_targets
         self.norm_reg_targets = norm_reg_targets
@@ -358,7 +360,8 @@ class Gt2FCOSTarget(BaseOperator):
         r_res = clipped_box[:, :, 2] - xs
         r_res = clipped_box[:, :, 2] - xs
         t_res = ys - clipped_box[:, :, 1]
         t_res = ys - clipped_box[:, :, 1]
         b_res = clipped_box[:, :, 3] - ys
         b_res = clipped_box[:, :, 3] - ys
-        clipped_box_reg_targets = np.stack([l_res, t_res, r_res, b_res], axis=2)
+        clipped_box_reg_targets = np.stack(
+            [l_res, t_res, r_res, b_res], axis=2)
         inside_gt_box = np.min(clipped_box_reg_targets, axis=2) > 0
         inside_gt_box = np.min(clipped_box_reg_targets, axis=2) > 0
         return inside_gt_box
         return inside_gt_box
 
 
@@ -439,8 +442,10 @@ class Gt2FCOSTarget(BaseOperator):
                 split_sections.append(end)
                 split_sections.append(end)
                 beg = end
                 beg = end
             labels_by_level = np.split(labels, split_sections, axis=0)
             labels_by_level = np.split(labels, split_sections, axis=0)
-            reg_targets_by_level = np.split(reg_targets, split_sections, axis=0)
-            ctn_targets_by_level = np.split(ctn_targets, split_sections, axis=0)
+            reg_targets_by_level = np.split(
+                reg_targets, split_sections, axis=0)
+            ctn_targets_by_level = np.split(
+                ctn_targets, split_sections, axis=0)
             for lvl in range(len(self.downsample_ratios)):
             for lvl in range(len(self.downsample_ratios)):
                 grid_w = int(np.ceil(w / self.downsample_ratios[lvl]))
                 grid_w = int(np.ceil(w / self.downsample_ratios[lvl]))
                 grid_h = int(np.ceil(h / self.downsample_ratios[lvl]))
                 grid_h = int(np.ceil(h / self.downsample_ratios[lvl]))
@@ -472,7 +477,7 @@ class Gt2TTFTarget(BaseOperator):
     """
     """
     Gt2TTFTarget
     Gt2TTFTarget
     Generate TTFNet targets by ground truth data
     Generate TTFNet targets by ground truth data
-    
+
     Args:
     Args:
         num_classes(int): the number of classes.
         num_classes(int): the number of classes.
         down_ratio(int): the down ratio from images to heatmap, 4 by default.
         down_ratio(int): the down ratio from images to heatmap, 4 by default.
@@ -523,7 +528,8 @@ class Gt2TTFTarget(BaseOperator):
 
 
             for k in range(len(gt_bbox)):
             for k in range(len(gt_bbox)):
                 cls_id = gt_class[k]
                 cls_id = gt_class[k]
-                fake_heatmap = np.zeros((feat_size, feat_size), dtype='float32')
+                fake_heatmap = np.zeros(
+                    (feat_size, feat_size), dtype='float32')
                 self.draw_truncate_gaussian(fake_heatmap, ct_inds[k],
                 self.draw_truncate_gaussian(fake_heatmap, ct_inds[k],
                                             h_radiuses_alpha[k],
                                             h_radiuses_alpha[k],
                                             w_radiuses_alpha[k])
                                             w_radiuses_alpha[k])
@@ -606,7 +612,8 @@ class Gt2Solov2Target(BaseOperator):
             im_c, im_h, im_w = sample['image'].shape[:]
             im_c, im_h, im_w = sample['image'].shape[:]
             gt_masks_raw = sample['gt_segm'].astype(np.uint8)
             gt_masks_raw = sample['gt_segm'].astype(np.uint8)
             mask_feat_size = [
             mask_feat_size = [
-                int(im_h / self.sampling_ratio), int(im_w / self.sampling_ratio)
+                int(im_h / self.sampling_ratio),
+                int(im_w / self.sampling_ratio)
             ]
             ]
             gt_areas = np.sqrt((gt_bboxes_raw[:, 2] - gt_bboxes_raw[:, 0]) *
             gt_areas = np.sqrt((gt_bboxes_raw[:, 2] - gt_bboxes_raw[:, 0]) *
                                (gt_bboxes_raw[:, 3] - gt_bboxes_raw[:, 1]))
                                (gt_bboxes_raw[:, 3] - gt_bboxes_raw[:, 1]))
@@ -662,15 +669,18 @@ class Gt2Solov2Target(BaseOperator):
                     top_box = max(0,
                     top_box = max(0,
                                   int(((center_h - half_h) / upsampled_size[0])
                                   int(((center_h - half_h) / upsampled_size[0])
                                       // (1. / num_grid)))
                                       // (1. / num_grid)))
-                    down_box = min(num_grid - 1,
-                                   int(((center_h + half_h) / upsampled_size[0])
-                                       // (1. / num_grid)))
-                    left_box = max(0,
-                                   int(((center_w - half_w) / upsampled_size[1])
-                                       // (1. / num_grid)))
+                    down_box = min(
+                        num_grid - 1,
+                        int(((center_h + half_h) / upsampled_size[0]) //
+                            (1. / num_grid)))
+                    left_box = max(
+                        0,
+                        int(((center_w - half_w) / upsampled_size[1]) //
+                            (1. / num_grid)))
                     right_box = min(num_grid - 1,
                     right_box = min(num_grid - 1,
                                     int(((center_w + half_w) /
                                     int(((center_w + half_w) /
-                                         upsampled_size[1]) // (1. / num_grid)))
+                                         upsampled_size[1]) //
+                                        (1. / num_grid)))
 
 
                     top = max(top_box, coord_h - 1)
                     top = max(top_box, coord_h - 1)
                     down = min(down_box, coord_h + 1)
                     down = min(down_box, coord_h + 1)

+ 2 - 2
paddlex/ppdet/data/transform/gridmask_utils.py

@@ -68,8 +68,8 @@ class Gridmask(object):
         mask = Image.fromarray(np.uint8(mask))
         mask = Image.fromarray(np.uint8(mask))
         mask = mask.rotate(r)
         mask = mask.rotate(r)
         mask = np.asarray(mask)
         mask = np.asarray(mask)
-        mask = mask[(hh - h) // 2:(hh - h) // 2 + h, (ww - w) // 2:(ww - w) // 2
-                    + w].astype(np.float32)
+        mask = mask[(hh - h) // 2:(hh - h) // 2 + h, (ww - w) // 2:(ww - w) //
+                    2 + w].astype(np.float32)
 
 
         if self.mode == 1:
         if self.mode == 1:
             mask = 1 - mask
             mask = 1 - mask

+ 2 - 1
paddlex/ppdet/data/transform/keypoint_operators.py

@@ -334,7 +334,8 @@ class TagGenerate(object):
     def __call__(self, records):
     def __call__(self, records):
         kpts_lst = records['joints']
         kpts_lst = records['joints']
         kpts = kpts_lst[0]
         kpts = kpts_lst[0]
-        tagmap = np.zeros((self.max_people, self.num_joints, 4), dtype=np.int64)
+        tagmap = np.zeros(
+            (self.max_people, self.num_joints, 4), dtype=np.int64)
         inds = np.where(kpts[..., 2] > 0)
         inds = np.where(kpts[..., 2] > 0)
         p, j = inds[0], inds[1]
         p, j = inds[0], inds[1]
         visible = kpts[inds]
         visible = kpts[inds]

+ 9 - 8
paddlex/ppdet/data/transform/mot_operators.py

@@ -130,7 +130,7 @@ class LetterBoxResize(BaseOperator):
 
 
 @register_op
 @register_op
 class MOTRandomAffine(BaseOperator):
 class MOTRandomAffine(BaseOperator):
-    """ 
+    """
     Affine transform to image and coords to achieve the rotate, scale and
     Affine transform to image and coords to achieve the rotate, scale and
     shift effect for training image.
     shift effect for training image.
 
 
@@ -225,7 +225,8 @@ class MOTRandomAffine(BaseOperator):
 
 
             # apply angle-based reduction
             # apply angle-based reduction
             radians = a * math.pi / 180
             radians = a * math.pi / 180
-            reduction = max(abs(math.sin(radians)), abs(math.cos(radians)))**0.5
+            reduction = max(abs(math.sin(radians)),
+                            abs(math.cos(radians)))**0.5
             x = (xy[:, 2] + xy[:, 0]) / 2
             x = (xy[:, 2] + xy[:, 0]) / 2
             y = (xy[:, 3] + xy[:, 1]) / 2
             y = (xy[:, 3] + xy[:, 1]) / 2
             w = (xy[:, 2] - xy[:, 0]) * reduction
             w = (xy[:, 2] - xy[:, 0]) * reduction
@@ -269,7 +270,7 @@ class Gt2JDETargetThres(BaseOperator):
         anchors (list): anchors of JDE model
         anchors (list): anchors of JDE model
         anchor_masks (list): anchor_masks of JDE model
         anchor_masks (list): anchor_masks of JDE model
         downsample_ratios (list): downsample ratios of JDE model
         downsample_ratios (list): downsample ratios of JDE model
-        ide_thresh (float): thresh of identity, higher is groud truth 
+        ide_thresh (float): thresh of identity, higher is groud truth
         fg_thresh (float): thresh of foreground, higher is foreground
         fg_thresh (float): thresh of foreground, higher is foreground
         bg_thresh (float): thresh of background, lower is background
         bg_thresh (float): thresh of background, lower is background
         num_classes (int): number of classes
         num_classes (int): number of classes
@@ -511,8 +512,8 @@ class Gt2JDETargetMax(BaseOperator):
                     # XY coordinates
                     # XY coordinates
                     tbox[:, :, :, 0:2][a_i, gj_i, gi_i] = gxy - gxy.astype(int)
                     tbox[:, :, :, 0:2][a_i, gj_i, gi_i] = gxy - gxy.astype(int)
                     # Width and height in yolo method
                     # Width and height in yolo method
-                    tbox[:, :, :, 2:4][a_i, gj_i, gi_i] = np.log(gwh /
-                                                                 anchor_hw[a_i])
+                    tbox[:, :, :, 2:4][a_i, gj_i, gi_i] = np.log(
+                        gwh / anchor_hw[a_i])
                     tconf[a_i, gj_i, gi_i] = 1
                     tconf[a_i, gj_i, gi_i] = 1
                     tid[a_i, gj_i, gi_i] = t_id
                     tid[a_i, gj_i, gi_i] = t_id
 
 
@@ -528,7 +529,7 @@ class Gt2FairMOTTarget(Gt2TTFTarget):
     Difference between Gt2FairMOTTarget and Gt2TTFTarget are:
     Difference between Gt2FairMOTTarget and Gt2TTFTarget are:
         1. the gaussian kernal radius to generate a heatmap.
         1. the gaussian kernal radius to generate a heatmap.
         2. the targets needed during traing.
         2. the targets needed during traing.
-    
+
     Args:
     Args:
         num_classes(int): the number of classes.
         num_classes(int): the number of classes.
         down_ratio(int): the down ratio from images to heatmap, 4 by default.
         down_ratio(int): the down ratio from images to heatmap, 4 by default.
@@ -586,8 +587,8 @@ class Gt2FairMOTTarget(Gt2TTFTarget):
                     radius = max(0, int(radius))
                     radius = max(0, int(radius))
                     ct = np.array([bbox[0], bbox[1]], dtype=np.float32)
                     ct = np.array([bbox[0], bbox[1]], dtype=np.float32)
                     ct_int = ct.astype(np.int32)
                     ct_int = ct.astype(np.int32)
-                    self.draw_truncate_gaussian(heatmap[cls_id], ct_int, radius,
-                                                radius)
+                    self.draw_truncate_gaussian(heatmap[cls_id], ct_int,
+                                                radius, radius)
                     bbox_size[k] = ct[0] - bbox_amodal[0], ct[1] - bbox_amodal[1], \
                     bbox_size[k] = ct[0] - bbox_amodal[0], ct[1] - bbox_amodal[1], \
                             bbox_amodal[2] - ct[0], bbox_amodal[3] - ct[1]
                             bbox_amodal[2] - ct[0], bbox_amodal[3] - ct[1]
 
 

+ 6 - 2
paddlex/ppdet/data/transform/op_helper.py

@@ -61,7 +61,10 @@ def is_overlap(object_bbox, sample_bbox):
         return True
         return True
 
 
 
 
-def filter_and_process(sample_bbox, bboxes, labels, scores=None,
+def filter_and_process(sample_bbox,
+                       bboxes,
+                       labels,
+                       scores=None,
                        keypoints=None):
                        keypoints=None):
     new_bboxes = []
     new_bboxes = []
     new_labels = []
     new_labels = []
@@ -233,7 +236,8 @@ def data_anchor_sampling(bbox_labels, image_width, image_height, scale_array,
 
 
         else:
         else:
             w_off_orig = np.random.uniform(image_width - sample_bbox_size, 0.0)
             w_off_orig = np.random.uniform(image_width - sample_bbox_size, 0.0)
-            h_off_orig = np.random.uniform(image_height - sample_bbox_size, 0.0)
+            h_off_orig = np.random.uniform(image_height - sample_bbox_size,
+                                           0.0)
 
 
         w_off_orig = math.floor(w_off_orig)
         w_off_orig = math.floor(w_off_orig)
         h_off_orig = math.floor(h_off_orig)
         h_off_orig = math.floor(h_off_orig)

+ 29 - 21
paddlex/ppdet/data/transform/operators.py

@@ -41,11 +41,11 @@ from paddlex.ppdet.core.workspace import serializable
 from paddlex.ppdet.modeling.layers import AnchorGrid
 from paddlex.ppdet.modeling.layers import AnchorGrid
 from paddlex.ppdet.modeling import bbox_utils
 from paddlex.ppdet.modeling import bbox_utils
 
 
-from .op_helper import (satisfy_sample_constraint, filter_and_process,
-                        generate_sample_bbox, clip_bbox, data_anchor_sampling,
-                        satisfy_sample_constraint_coverage, crop_image_sampling,
-                        generate_sample_bbox_square, bbox_area_sampling,
-                        is_poly, gaussian_radius, draw_gaussian)
+from .op_helper import (
+    satisfy_sample_constraint, filter_and_process, generate_sample_bbox,
+    clip_bbox, data_anchor_sampling, satisfy_sample_constraint_coverage,
+    crop_image_sampling, generate_sample_bbox_square, bbox_area_sampling,
+    is_poly, gaussian_radius, draw_gaussian)
 
 
 from paddlex.ppdet.utils.logger import setup_logger
 from paddlex.ppdet.utils.logger import setup_logger
 logger = setup_logger(__name__)
 logger = setup_logger(__name__)
@@ -58,7 +58,8 @@ def register_op(cls):
     if not hasattr(BaseOperator, cls.__name__):
     if not hasattr(BaseOperator, cls.__name__):
         setattr(BaseOperator, cls.__name__, cls)
         setattr(BaseOperator, cls.__name__, cls)
     else:
     else:
-        raise KeyError("The {} class has been registered.".format(cls.__name__))
+        raise KeyError("The {} class has been registered.".format(
+            cls.__name__))
     return serializable(cls)
     return serializable(cls)
 
 
 
 
@@ -238,7 +239,9 @@ class RandomErasingImage(BaseOperator):
 
 
 @register_op
 @register_op
 class NormalizeImage(BaseOperator):
 class NormalizeImage(BaseOperator):
-    def __init__(self, mean=[0.485, 0.456, 0.406], std=[1, 1, 1],
+    def __init__(self,
+                 mean=[0.485, 0.456, 0.406],
+                 std=[1, 1, 1],
                  is_scale=True):
                  is_scale=True):
         """
         """
         Args:
         Args:
@@ -322,7 +325,8 @@ class GridMask(BaseOperator):
             upper_iter=upper_iter)
             upper_iter=upper_iter)
 
 
     def apply(self, sample, context=None):
     def apply(self, sample, context=None):
-        sample['image'] = self.gridmask_op(sample['image'], sample['curr_iter'])
+        sample['image'] = self.gridmask_op(sample['image'],
+                                           sample['curr_iter'])
         return sample
         return sample
 
 
 
 
@@ -514,7 +518,8 @@ class RandomFlip(BaseOperator):
         for segm in segms:
         for segm in segms:
             if is_poly(segm):
             if is_poly(segm):
                 # Polygon format
                 # Polygon format
-                flipped_segms.append([_flip_poly(poly, width) for poly in segm])
+                flipped_segms.append(
+                    [_flip_poly(poly, width) for poly in segm])
             else:
             else:
                 # RLE format
                 # RLE format
                 import pycocotools.mask as mask_util
                 import pycocotools.mask as mask_util
@@ -582,8 +587,8 @@ class RandomFlip(BaseOperator):
                 sample['gt_segm'] = sample['gt_segm'][:, :, ::-1]
                 sample['gt_segm'] = sample['gt_segm'][:, :, ::-1]
 
 
             if 'gt_rbox2poly' in sample and sample['gt_rbox2poly'].any():
             if 'gt_rbox2poly' in sample and sample['gt_rbox2poly'].any():
-                sample['gt_rbox2poly'] = self.apply_rbox(sample['gt_rbox2poly'],
-                                                         width)
+                sample['gt_rbox2poly'] = self.apply_rbox(
+                    sample['gt_rbox2poly'], width)
 
 
             sample['flipped'] = True
             sample['flipped'] = True
             sample['image'] = im
             sample['image'] = im
@@ -594,7 +599,7 @@ class RandomFlip(BaseOperator):
 class Resize(BaseOperator):
 class Resize(BaseOperator):
     def __init__(self, target_size, keep_ratio, interp=cv2.INTER_LINEAR):
     def __init__(self, target_size, keep_ratio, interp=cv2.INTER_LINEAR):
         """
         """
-        Resize image to target size. if keep_ratio is True, 
+        Resize image to target size. if keep_ratio is True,
         resize the image's long side to the maximum of target_size
         resize the image's long side to the maximum of target_size
         if keep_ratio is False, resize the image to target size(h, w)
         if keep_ratio is False, resize the image to target size(h, w)
         Args:
         Args:
@@ -734,8 +739,8 @@ class Resize(BaseOperator):
 
 
         # apply polygon
         # apply polygon
         if 'gt_poly' in sample and len(sample['gt_poly']) > 0:
         if 'gt_poly' in sample and len(sample['gt_poly']) > 0:
-            sample['gt_poly'] = self.apply_segm(sample['gt_poly'], im_shape[:2],
-                                                [im_scale_x, im_scale_y])
+            sample['gt_poly'] = self.apply_segm(
+                sample['gt_poly'], im_shape[:2], [im_scale_x, im_scale_y])
 
 
         # apply semantic
         # apply semantic
         if 'semantic' in sample and sample['semantic']:
         if 'semantic' in sample and sample['semantic']:
@@ -1241,14 +1246,15 @@ class RandomCrop(BaseOperator):
                             if not isinstance(part, Polygon):
                             if not isinstance(part, Polygon):
                                 continue
                                 continue
                             part = np.squeeze(
                             part = np.squeeze(
-                                np.array(part.exterior.coords[:-1]).reshape(1,
-                                                                            -1))
+                                np.array(part.exterior.coords[:-1]).reshape(
+                                    1, -1))
                             part[0::2] -= xmin
                             part[0::2] -= xmin
                             part[1::2] -= ymin
                             part[1::2] -= ymin
                             crop_segm.append(part.tolist())
                             crop_segm.append(part.tolist())
                     elif isinstance(inter, Polygon):
                     elif isinstance(inter, Polygon):
                         crop_poly = np.squeeze(
                         crop_poly = np.squeeze(
-                            np.array(inter.exterior.coords[:-1]).reshape(1, -1))
+                            np.array(inter.exterior.coords[:-1]).reshape(1,
+                                                                         -1))
                         crop_poly[0::2] -= xmin
                         crop_poly[0::2] -= xmin
                         crop_poly[1::2] -= ymin
                         crop_poly[1::2] -= ymin
                         crop_segm.append(crop_poly.tolist())
                         crop_segm.append(crop_poly.tolist())
@@ -1485,7 +1491,7 @@ class RandomScaledCrop(BaseOperator):
 @register_op
 @register_op
 class Cutmix(BaseOperator):
 class Cutmix(BaseOperator):
     def __init__(self, alpha=1.5, beta=1.5):
     def __init__(self, alpha=1.5, beta=1.5):
-        """ 
+        """
         CutMix: Regularization Strategy to Train Strong Classifiers with Localizable Features, see https://arxiv.org/abs/1905.04899
         CutMix: Regularization Strategy to Train Strong Classifiers with Localizable Features, see https://arxiv.org/abs/1905.04899
         Cutmix image and gt_bbbox/gt_score
         Cutmix image and gt_bbbox/gt_score
         Args:
         Args:
@@ -1808,7 +1814,9 @@ class DebugVisibleImage(BaseOperator):
                     x1 = round(keypoint[2 * j]).astype(np.int32)
                     x1 = round(keypoint[2 * j]).astype(np.int32)
                     y1 = round(keypoint[2 * j + 1]).astype(np.int32)
                     y1 = round(keypoint[2 * j + 1]).astype(np.int32)
                     draw.ellipse(
                     draw.ellipse(
-                        (x1, y1, x1 + 5, y1 + 5), fill='green', outline='green')
+                        (x1, y1, x1 + 5, y1 + 5),
+                        fill='green',
+                        outline='green')
         save_path = os.path.join(self.output_dir, out_file_name)
         save_path = os.path.join(self.output_dir, out_file_name)
         image.save(save_path, quality=95)
         image.save(save_path, quality=95)
         return sample
         return sample
@@ -2015,10 +2023,10 @@ class Rbox2Poly(BaseOperator):
 @register_op
 @register_op
 class AugmentHSV(BaseOperator):
 class AugmentHSV(BaseOperator):
     def __init__(self, fraction=0.50, is_bgr=False):
     def __init__(self, fraction=0.50, is_bgr=False):
-        """ 
+        """
         Augment the SV channel of image data.
         Augment the SV channel of image data.
         Args:
         Args:
-            fraction (float): the fraction for augment 
+            fraction (float): the fraction for augment
             is_bgr (bool): whether the image is BGR mode
             is_bgr (bool): whether the image is BGR mode
         """
         """
         super(AugmentHSV, self).__init__()
         super(AugmentHSV, self).__init__()

+ 12 - 12
paddlex/ppdet/engine/__init__.py

@@ -1,15 +1,15 @@
-# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. 
-#   
-# Licensed under the Apache License, Version 2.0 (the "License");   
-# you may not use this file except in compliance with the License.  
-# You may obtain a copy of the License at   
-#   
-#     http://www.apache.org/licenses/LICENSE-2.0    
-#   
-# Unless required by applicable law or agreed to in writing, software   
-# distributed under the License is distributed on an "AS IS" BASIS, 
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  
-# See the License for the specific language governing permissions and   
+# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
 # limitations under the License.
 # limitations under the License.
 
 
 from . import trainer
 from . import trainer

+ 20 - 17
paddlex/ppdet/engine/callbacks.py

@@ -1,15 +1,15 @@
-# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. 
-#   
-# Licensed under the Apache License, Version 2.0 (the "License");   
-# you may not use this file except in compliance with the License.  
-# You may obtain a copy of the License at   
-#   
-#     http://www.apache.org/licenses/LICENSE-2.0    
-#   
-# Unless required by applicable law or agreed to in writing, software   
-# distributed under the License is distributed on an "AS IS" BASIS, 
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  
-# See the License for the specific language governing permissions and   
+# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
 # limitations under the License.
 # limitations under the License.
 
 
 from __future__ import absolute_import
 from __future__ import absolute_import
@@ -97,7 +97,8 @@ class LogPrinter(Callback):
                 logs = training_staus.log()
                 logs = training_staus.log()
                 space_fmt = ':' + str(len(str(steps_per_epoch))) + 'd'
                 space_fmt = ':' + str(len(str(steps_per_epoch))) + 'd'
                 if step_id % self.model.cfg.log_iter == 0:
                 if step_id % self.model.cfg.log_iter == 0:
-                    eta_steps = (epoches - epoch_id) * steps_per_epoch - step_id
+                    eta_steps = (epoches - epoch_id
+                                 ) * steps_per_epoch - step_id
                     eta_sec = eta_steps * batch_time.global_avg
                     eta_sec = eta_steps * batch_time.global_avg
                     eta_str = str(datetime.timedelta(seconds=int(eta_sec)))
                     eta_str = str(datetime.timedelta(seconds=int(eta_sec)))
                     ips = float(batch_size) / batch_time.avg
                     ips = float(batch_size) / batch_time.avg
@@ -162,7 +163,8 @@ class Checkpointer(Callback):
                         epoch_id + 1
                         epoch_id + 1
                 ) % self.model.cfg.snapshot_epoch == 0 or epoch_id == end_epoch - 1:
                 ) % self.model.cfg.snapshot_epoch == 0 or epoch_id == end_epoch - 1:
                     save_name = str(
                     save_name = str(
-                        epoch_id) if epoch_id != end_epoch - 1 else "model_final"
+                        epoch_id
+                    ) if epoch_id != end_epoch - 1 else "model_final"
                     weight = self.weight
                     weight = self.weight
             elif mode == 'eval':
             elif mode == 'eval':
                 if 'save_best_model' in status and status['save_best_model']:
                 if 'save_best_model' in status and status['save_best_model']:
@@ -217,7 +219,8 @@ class VisualDLWriter(Callback):
             logger.error('visualdl not found, plaese install visualdl. '
             logger.error('visualdl not found, plaese install visualdl. '
                          'for example: `pip install visualdl`.')
                          'for example: `pip install visualdl`.')
             raise e
             raise e
-        self.vdl_writer = LogWriter(model.cfg.get('vdl_log_dir', 'vdl_log_dir/scalar'))
+        self.vdl_writer = LogWriter(
+            model.cfg.get('vdl_log_dir', 'vdl_log_dir/scalar'))
         self.vdl_loss_step = 0
         self.vdl_loss_step = 0
         self.vdl_mAP_step = 0
         self.vdl_mAP_step = 0
         self.vdl_image_step = 0
         self.vdl_image_step = 0
@@ -236,8 +239,8 @@ class VisualDLWriter(Callback):
                 ori_image = status['original_image']
                 ori_image = status['original_image']
                 result_image = status['result_image']
                 result_image = status['result_image']
                 self.vdl_writer.add_image(
                 self.vdl_writer.add_image(
-                    "original/frame_{}".format(self.vdl_image_frame), ori_image,
-                    self.vdl_image_step)
+                    "original/frame_{}".format(self.vdl_image_frame),
+                    ori_image, self.vdl_image_step)
                 self.vdl_writer.add_image(
                 self.vdl_writer.add_image(
                     "result/frame_{}".format(self.vdl_image_frame),
                     "result/frame_{}".format(self.vdl_image_frame),
                     result_image, self.vdl_image_step)
                     result_image, self.vdl_image_step)

+ 12 - 12
paddlex/ppdet/engine/env.py

@@ -1,15 +1,15 @@
-# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. 
-#   
-# Licensed under the Apache License, Version 2.0 (the "License");   
-# you may not use this file except in compliance with the License.  
-# You may obtain a copy of the License at   
-#   
-#     http://www.apache.org/licenses/LICENSE-2.0    
-#   
-# Unless required by applicable law or agreed to in writing, software   
-# distributed under the License is distributed on an "AS IS" BASIS, 
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  
-# See the License for the specific language governing permissions and   
+# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
 # limitations under the License.
 # limitations under the License.
 
 
 from __future__ import absolute_import
 from __future__ import absolute_import

+ 14 - 13
paddlex/ppdet/engine/export_utils.py

@@ -1,15 +1,15 @@
-# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. 
-#   
-# Licensed under the Apache License, Version 2.0 (the "License");   
-# you may not use this file except in compliance with the License.  
-# You may obtain a copy of the License at   
-#   
-#     http://www.apache.org/licenses/LICENSE-2.0    
-#   
-# Unless required by applicable law or agreed to in writing, software   
-# distributed under the License is distributed on an "AS IS" BASIS, 
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  
-# See the License for the specific language governing permissions and   
+# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
 # limitations under the License.
 # limitations under the License.
 
 
 from __future__ import absolute_import
 from __future__ import absolute_import
@@ -114,4 +114,5 @@ def _dump_infer_config(config, path, image_shape, model):
             infer_cfg['label_list'].insert(0, 'background')
             infer_cfg['label_list'].insert(0, 'background')
 
 
     yaml.dump(infer_cfg, open(path, 'w'))
     yaml.dump(infer_cfg, open(path, 'w'))
-    logger.info("Export inference config file to {}".format(os.path.join(path)))
+    logger.info("Export inference config file to {}".format(
+        os.path.join(path)))

+ 12 - 12
paddlex/ppdet/metrics/coco_utils.py

@@ -1,15 +1,15 @@
-# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. 
-#   
-# Licensed under the Apache License, Version 2.0 (the "License");   
-# you may not use this file except in compliance with the License.  
-# You may obtain a copy of the License at   
-#   
-#     http://www.apache.org/licenses/LICENSE-2.0    
-#   
-# Unless required by applicable law or agreed to in writing, software   
-# distributed under the License is distributed on an "AS IS" BASIS, 
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  
-# See the License for the specific language governing permissions and   
+# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
 # limitations under the License.
 # limitations under the License.
 
 
 from __future__ import absolute_import
 from __future__ import absolute_import

+ 2 - 2
paddlex/ppdet/metrics/json_results.py

@@ -143,8 +143,8 @@ def get_keypoint_res(results, im_id):
             }
             }
             x = kpt[0::3]
             x = kpt[0::3]
             y = kpt[1::3]
             y = kpt[1::3]
-            x0, x1, y0, y1 = np.min(x).item(), np.max(x).item(), np.min(y).item(
-            ), np.max(y).item()
+            x0, x1, y0, y1 = np.min(x).item(), np.max(x).item(), np.min(
+                y).item(), np.max(y).item()
             ann['area'] = (x1 - x0) * (y1 - y0)
             ann['area'] = (x1 - x0) * (y1 - y0)
             ann['bbox'] = [x0, y0, x1 - x0, y1 - y0]
             ann['bbox'] = [x0, y0, x1 - x0, y1 - y0]
             anns.append(ann)
             anns.append(ann)

+ 16 - 16
paddlex/ppdet/metrics/keypoint_metrics.py

@@ -1,15 +1,15 @@
-# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. 
-#   
-# Licensed under the Apache License, Version 2.0 (the "License");   
-# you may not use this file except in compliance with the License.  
-# You may obtain a copy of the License at   
-#   
-#     http://www.apache.org/licenses/LICENSE-2.0    
-#   
-# Unless required by applicable law or agreed to in writing, software   
-# distributed under the License is distributed on an "AS IS" BASIS, 
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  
-# See the License for the specific language governing permissions and   
+# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
 # limitations under the License.
 # limitations under the License.
 
 
 import copy
 import copy
@@ -61,10 +61,10 @@ class KeyPointTopDownCOCOEval(object):
         num_images = inputs['image'].shape[0]
         num_images = inputs['image'].shape[0]
         self.results['all_preds'][self.idx:self.idx + num_images, :, 0:
         self.results['all_preds'][self.idx:self.idx + num_images, :, 0:
                                   3] = kpts[:, :, 0:3]
                                   3] = kpts[:, :, 0:3]
-        self.results['all_boxes'][self.idx:self.idx + num_images, 0:2] = inputs[
-            'center'].numpy()[:, 0:2]
-        self.results['all_boxes'][self.idx:self.idx + num_images, 2:4] = inputs[
-            'scale'].numpy()[:, 0:2]
+        self.results['all_boxes'][self.idx:self.idx + num_images, 0:
+                                  2] = inputs['center'].numpy()[:, 0:2]
+        self.results['all_boxes'][self.idx:self.idx + num_images, 2:
+                                  4] = inputs['scale'].numpy()[:, 0:2]
         self.results['all_boxes'][self.idx:self.idx + num_images, 4] = np.prod(
         self.results['all_boxes'][self.idx:self.idx + num_images, 4] = np.prod(
             inputs['scale'].numpy() * 200, 1)
             inputs['scale'].numpy() * 200, 1)
         self.results['all_boxes'][self.idx:self.idx + num_images,
         self.results['all_boxes'][self.idx:self.idx + num_images,

+ 7 - 6
paddlex/ppdet/metrics/map_utils.py

@@ -108,8 +108,8 @@ class DetectionMAP(object):
     Args:
     Args:
         class_num (int): The class number.
         class_num (int): The class number.
         overlap_thresh (float): The threshold of overlap
         overlap_thresh (float): The threshold of overlap
-            ratio between prediction bounding box and 
-            ground truth bounding box for deciding 
+            ratio between prediction bounding box and
+            ground truth bounding box for deciding
             true/false positive. Default 0.5.
             true/false positive. Default 0.5.
         map_type (str): Calculation method of mean average
         map_type (str): Calculation method of mean average
             precision, currently support '11point' and
             precision, currently support '11point' and
@@ -283,8 +283,9 @@ class DetectionMAP(object):
             num_columns = min(6, len(results_per_category) * 2)
             num_columns = min(6, len(results_per_category) * 2)
             results_flatten = list(itertools.chain(*results_per_category))
             results_flatten = list(itertools.chain(*results_per_category))
             headers = ['category', 'AP'] * (num_columns // 2)
             headers = ['category', 'AP'] * (num_columns // 2)
-            results_2d = itertools.zip_longest(
-                *[results_flatten[i::num_columns] for i in range(num_columns)])
+            results_2d = itertools.zip_longest(* [
+                results_flatten[i::num_columns] for i in range(num_columns)
+            ])
             table_data = [headers]
             table_data = [headers]
             table_data += [result for result in results_2d]
             table_data += [result for result in results_2d]
             table = AsciiTable(table_data)
             table = AsciiTable(table_data)
@@ -315,7 +316,7 @@ def ap_per_class(tp, conf, pred_cls, target_cls):
     """
     """
     Computes the average precision, given the recall and precision curves.
     Computes the average precision, given the recall and precision curves.
     Method originally from https://github.com/rafaelpadilla/Object-Detection-Metrics.
     Method originally from https://github.com/rafaelpadilla/Object-Detection-Metrics.
-    
+
     Args:
     Args:
         tp (list): True positives.
         tp (list): True positives.
         conf (list): Objectness value from 0-1.
         conf (list): Objectness value from 0-1.
@@ -369,7 +370,7 @@ def compute_ap(recall, precision):
     """
     """
     Computes the average precision, given the recall and precision curves.
     Computes the average precision, given the recall and precision curves.
     Code originally from https://github.com/rbgirshick/py-faster-rcnn.
     Code originally from https://github.com/rbgirshick/py-faster-rcnn.
-    
+
     Args:
     Args:
         recall (list): The recall curve.
         recall (list): The recall curve.
         precision (list): The precision curve.
         precision (list): The precision curve.

+ 16 - 16
paddlex/ppdet/metrics/metrics.py

@@ -1,15 +1,15 @@
-# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. 
-#   
-# Licensed under the Apache License, Version 2.0 (the "License");   
-# you may not use this file except in compliance with the License.  
-# You may obtain a copy of the License at   
-#   
-#     http://www.apache.org/licenses/LICENSE-2.0    
-#   
-# Unless required by applicable law or agreed to in writing, software   
-# distributed under the License is distributed on an "AS IS" BASIS, 
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  
-# See the License for the specific language governing permissions and   
+# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
 # limitations under the License.
 # limitations under the License.
 
 
 from __future__ import absolute_import
 from __future__ import absolute_import
@@ -35,8 +35,8 @@ __all__ = [
 ]
 ]
 
 
 COCO_SIGMAS = np.array([
 COCO_SIGMAS = np.array([
-    .26, .25, .25, .35, .35, .79, .79, .72, .72, .62, .62, 1.07, 1.07, .87, .87,
-    .89, .89
+    .26, .25, .25, .35, .35, .79, .79, .72, .72, .62, .62, 1.07, 1.07, .87,
+    .87, .89, .89
 ]) / 10.0
 ]) / 10.0
 CROWD_SIGMAS = np.array(
 CROWD_SIGMAS = np.array(
     [.79, .79, .72, .72, .62, .62, 1.07, 1.07, .87, .87, .89, .89, .79,
     [.79, .79, .72, .72, .62, .62, 1.07, 1.07, .87, .87, .89, .89, .79,
@@ -275,8 +275,8 @@ class VOCMetric(Metric):
 
 
     def log(self):
     def log(self):
         map_stat = 100. * self.detection_map.get_map()
         map_stat = 100. * self.detection_map.get_map()
-        logger.info("mAP({:.2f}, {}) = {:.2f}%".format(self.overlap_thresh,
-                                                       self.map_type, map_stat))
+        logger.info("mAP({:.2f}, {}) = {:.2f}%".format(
+            self.overlap_thresh, self.map_type, map_stat))
 
 
     def get_results(self):
     def get_results(self):
         return {'bbox': [self.detection_map.get_map()]}
         return {'bbox': [self.detection_map.get_map()]}

+ 2 - 1
paddlex/ppdet/metrics/mot_eval_utils.py

@@ -144,7 +144,8 @@ class MOTEvaluator(object):
             trk_ids = trk_ids[keep]
             trk_ids = trk_ids[keep]
 
 
         # get distance matrix
         # get distance matrix
-        iou_distance = mm.distances.iou_matrix(gt_tlwhs, trk_tlwhs, max_iou=0.5)
+        iou_distance = mm.distances.iou_matrix(
+            gt_tlwhs, trk_tlwhs, max_iou=0.5)
 
 
         # acc
         # acc
         self.acc.update(gt_ids, trk_ids, iou_distance)
         self.acc.update(gt_ids, trk_ids, iou_distance)

+ 12 - 12
paddlex/ppdet/metrics/mot_metrics.py

@@ -1,15 +1,15 @@
-# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. 
-#   
-# Licensed under the Apache License, Version 2.0 (the "License");   
-# you may not use this file except in compliance with the License.  
-# You may obtain a copy of the License at   
-#   
-#     http://www.apache.org/licenses/LICENSE-2.0    
-#   
-# Unless required by applicable law or agreed to in writing, software   
-# distributed under the License is distributed on an "AS IS" BASIS, 
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  
-# See the License for the specific language governing permissions and   
+# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
 # limitations under the License.
 # limitations under the License.
 from __future__ import absolute_import
 from __future__ import absolute_import
 from __future__ import division
 from __future__ import division

+ 8 - 6
paddlex/ppdet/metrics/widerface_utils.py

@@ -130,8 +130,8 @@ def multi_scale_test(model, image, max_shrink):
     st = 0.5 if max_shrink >= 0.75 else 0.5 * max_shrink
     st = 0.5 if max_shrink >= 0.75 else 0.5 * max_shrink
     det_s = detect_face(model, image, st)
     det_s = detect_face(model, image, st)
     index = np.where(
     index = np.where(
-        np.maximum(det_s[:, 2] - det_s[:, 0] + 1, det_s[:, 3] - det_s[:, 1] + 1)
-        > 30)[0]
+        np.maximum(det_s[:, 2] - det_s[:, 0] + 1,
+                   det_s[:, 3] - det_s[:, 1] + 1) > 30)[0]
     det_s = det_s[index, :]
     det_s = det_s[index, :]
     # Enlarge one times
     # Enlarge one times
     bt = min(2, max_shrink) if max_shrink > 1 else (st + max_shrink) / 2
     bt = min(2, max_shrink) if max_shrink > 1 else (st + max_shrink) / 2
@@ -164,8 +164,8 @@ def multi_scale_test_pyramid(model, image, max_shrink):
     # Use image pyramids to detect faces
     # Use image pyramids to detect faces
     det_b = detect_face(model, image, 0.25)
     det_b = detect_face(model, image, 0.25)
     index = np.where(
     index = np.where(
-        np.maximum(det_b[:, 2] - det_b[:, 0] + 1, det_b[:, 3] - det_b[:, 1] + 1)
-        > 30)[0]
+        np.maximum(det_b[:, 2] - det_b[:, 0] + 1,
+                   det_b[:, 3] - det_b[:, 1] + 1) > 30)[0]
     det_b = det_b[index, :]
     det_b = det_b[index, :]
 
 
     st = [0.75, 1.25, 1.5, 1.75]
     st = [0.75, 1.25, 1.5, 1.75]
@@ -371,9 +371,11 @@ def lmk2out(results, is_bbox_normalized=False):
                 prior_h_center = (me_prior[3] + me_prior[1]) / 2
                 prior_h_center = (me_prior[3] + me_prior[1]) / 2
                 lmk_decode = np.zeros((10))
                 lmk_decode = np.zeros((10))
                 for j in [0, 2, 4, 6, 8]:
                 for j in [0, 2, 4, 6, 8]:
-                    lmk_decode[j] = lmk_pred[j] * 0.1 * prior_w + prior_w_center
+                    lmk_decode[j] = lmk_pred[
+                        j] * 0.1 * prior_w + prior_w_center
                 for j in [1, 3, 5, 7, 9]:
                 for j in [1, 3, 5, 7, 9]:
-                    lmk_decode[j] = lmk_pred[j] * 0.1 * prior_h + prior_h_center
+                    lmk_decode[j] = lmk_pred[
+                        j] * 0.1 * prior_h + prior_h_center
                 im_shape = t['im_shape'][0][a].tolist()
                 im_shape = t['im_shape'][0][a].tolist()
                 image_h, image_w = int(im_shape[0]), int(im_shape[1])
                 image_h, image_w = int(im_shape[0]), int(im_shape[1])
                 if is_bbox_normalized:
                 if is_bbox_normalized:

+ 12 - 12
paddlex/ppdet/model_zoo/__init__.py

@@ -1,15 +1,15 @@
-# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. 
-#   
-# Licensed under the Apache License, Version 2.0 (the "License");   
-# you may not use this file except in compliance with the License.  
-# You may obtain a copy of the License at   
-#   
-#     http://www.apache.org/licenses/LICENSE-2.0    
-#   
-# Unless required by applicable law or agreed to in writing, software   
-# distributed under the License is distributed on an "AS IS" BASIS, 
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  
-# See the License for the specific language governing permissions and   
+# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
 # limitations under the License.
 # limitations under the License.
 
 
 from . import model_zoo
 from . import model_zoo

+ 12 - 12
paddlex/ppdet/model_zoo/model_zoo.py

@@ -1,15 +1,15 @@
-# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. 
-#   
-# Licensed under the Apache License, Version 2.0 (the "License");   
-# you may not use this file except in compliance with the License.  
-# You may obtain a copy of the License at   
-#   
-#     http://www.apache.org/licenses/LICENSE-2.0    
-#   
-# Unless required by applicable law or agreed to in writing, software   
-# distributed under the License is distributed on an "AS IS" BASIS, 
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  
-# See the License for the specific language governing permissions and   
+# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
 # limitations under the License.
 # limitations under the License.
 
 
 import os
 import os

+ 0 - 1
paddlex/ppdet/model_zoo/tests/test_get_model.py

@@ -21,7 +21,6 @@ import paddle
 import paddlex.ppdet as ppdet
 import paddlex.ppdet as ppdet
 import unittest
 import unittest
 
 
-
 # NOTE: weights downloading costs time, we choose
 # NOTE: weights downloading costs time, we choose
 #       a small model for unittesting
 #       a small model for unittesting
 MODEL_NAME = 'ppyolo/ppyolo_tiny_650e_coco'
 MODEL_NAME = 'ppyolo/ppyolo_tiny_650e_coco'

+ 12 - 12
paddlex/ppdet/modeling/__init__.py

@@ -1,15 +1,15 @@
-# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. 
-#   
-# Licensed under the Apache License, Version 2.0 (the "License");   
-# you may not use this file except in compliance with the License.  
-# You may obtain a copy of the License at   
-#   
-#     http://www.apache.org/licenses/LICENSE-2.0    
-#   
-# Unless required by applicable law or agreed to in writing, software   
-# distributed under the License is distributed on an "AS IS" BASIS, 
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  
-# See the License for the specific language governing permissions and   
+# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
 # limitations under the License.
 # limitations under the License.
 
 
 import warnings
 import warnings

+ 7 - 7
paddlex/ppdet/modeling/architectures/__init__.py

@@ -1,10 +1,10 @@
-# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. 
-#   
-# Licensed under the Apache License, Version 2.0 (the "License");   
-# you may not use this file except in compliance with the License.  
-# You may obtain a copy of the License at   
-#   
-#     http://www.apache.org/licenses/LICENSE-2.0    
+# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
 from . import meta_arch
 from . import meta_arch
 from . import faster_rcnn
 from . import faster_rcnn
 from . import mask_rcnn
 from . import mask_rcnn

+ 14 - 13
paddlex/ppdet/modeling/architectures/cascade_rcnn.py

@@ -1,15 +1,15 @@
-# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. 
-#   
-# Licensed under the Apache License, Version 2.0 (the "License");   
-# you may not use this file except in compliance with the License.  
-# You may obtain a copy of the License at   
-#   
-#     http://www.apache.org/licenses/LICENSE-2.0    
-#   
-# Unless required by applicable law or agreed to in writing, software   
-# distributed under the License is distributed on an "AS IS" BASIS, 
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  
-# See the License for the specific language governing permissions and   
+# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
 # limitations under the License.
 # limitations under the License.
 
 
 from __future__ import absolute_import
 from __future__ import absolute_import
@@ -96,7 +96,8 @@ class CascadeRCNN(BaseArch):
             bbox_targets = self.bbox_head.get_assigned_targets()
             bbox_targets = self.bbox_head.get_assigned_targets()
             if self.with_mask:
             if self.with_mask:
                 mask_loss = self.mask_head(body_feats, rois, rois_num,
                 mask_loss = self.mask_head(body_feats, rois, rois_num,
-                                           self.inputs, bbox_targets, bbox_feat)
+                                           self.inputs, bbox_targets,
+                                           bbox_feat)
                 return rpn_loss, bbox_loss, mask_loss
                 return rpn_loss, bbox_loss, mask_loss
             else:
             else:
                 return rpn_loss, bbox_loss, {}
                 return rpn_loss, bbox_loss, {}

+ 12 - 12
paddlex/ppdet/modeling/architectures/centernet.py

@@ -1,15 +1,15 @@
-# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. 
-#   
-# Licensed under the Apache License, Version 2.0 (the "License");   
-# you may not use this file except in compliance with the License.  
-# You may obtain a copy of the License at   
-#   
-#     http://www.apache.org/licenses/LICENSE-2.0    
-#   
-# Unless required by applicable law or agreed to in writing, software   
-# distributed under the License is distributed on an "AS IS" BASIS, 
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  
-# See the License for the specific language governing permissions and   
+# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
 # limitations under the License.
 # limitations under the License.
 
 
 from __future__ import absolute_import
 from __future__ import absolute_import

+ 12 - 12
paddlex/ppdet/modeling/architectures/deepsort.py

@@ -1,15 +1,15 @@
-# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. 
-#   
-# Licensed under the Apache License, Version 2.0 (the "License");   
-# you may not use this file except in compliance with the License.  
-# You may obtain a copy of the License at   
-#   
-#     http://www.apache.org/licenses/LICENSE-2.0    
-# 
-# Unless required by applicable law or agreed to in writing, software   
-# distributed under the License is distributed on an "AS IS" BASIS, 
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  
-# See the License for the specific language governing permissions and   
+# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
 # limitations under the License.
 # limitations under the License.
 
 
 from __future__ import absolute_import
 from __future__ import absolute_import

+ 12 - 12
paddlex/ppdet/modeling/architectures/fairmot.py

@@ -1,15 +1,15 @@
-# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. 
-#   
-# Licensed under the Apache License, Version 2.0 (the "License");   
-# you may not use this file except in compliance with the License.  
-# You may obtain a copy of the License at   
-#   
-#     http://www.apache.org/licenses/LICENSE-2.0    
-#   
-# Unless required by applicable law or agreed to in writing, software   
-# distributed under the License is distributed on an "AS IS" BASIS, 
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  
-# See the License for the specific language governing permissions and   
+# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
 # limitations under the License.
 # limitations under the License.
 
 
 from __future__ import absolute_import
 from __future__ import absolute_import

+ 12 - 12
paddlex/ppdet/modeling/architectures/faster_rcnn.py

@@ -1,15 +1,15 @@
-# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. 
-#   
-# Licensed under the Apache License, Version 2.0 (the "License");   
-# you may not use this file except in compliance with the License.  
-# You may obtain a copy of the License at   
-#   
-#     http://www.apache.org/licenses/LICENSE-2.0    
-#   
-# Unless required by applicable law or agreed to in writing, software   
-# distributed under the License is distributed on an "AS IS" BASIS, 
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  
-# See the License for the specific language governing permissions and   
+# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
 # limitations under the License.
 # limitations under the License.
 
 
 from __future__ import absolute_import
 from __future__ import absolute_import

+ 12 - 12
paddlex/ppdet/modeling/architectures/fcos.py

@@ -1,15 +1,15 @@
-# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. 
-#   
-# Licensed under the Apache License, Version 2.0 (the "License");   
-# you may not use this file except in compliance with the License.  
-# You may obtain a copy of the License at   
-#   
-#     http://www.apache.org/licenses/LICENSE-2.0    
-#   
-# Unless required by applicable law or agreed to in writing, software   
-# distributed under the License is distributed on an "AS IS" BASIS, 
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  
-# See the License for the specific language governing permissions and   
+# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
 # limitations under the License.
 # limitations under the License.
 
 
 from __future__ import absolute_import
 from __future__ import absolute_import

+ 15 - 14
paddlex/ppdet/modeling/architectures/jde.py

@@ -1,15 +1,15 @@
-# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. 
-#   
-# Licensed under the Apache License, Version 2.0 (the "License");   
-# you may not use this file except in compliance with the License.  
-# You may obtain a copy of the License at   
-#   
-#     http://www.apache.org/licenses/LICENSE-2.0    
-# 
-# Unless required by applicable law or agreed to in writing, software   
-# distributed under the License is distributed on an "AS IS" BASIS, 
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  
-# See the License for the specific language governing permissions and   
+# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
 # limitations under the License.
 # limitations under the License.
 
 
 from __future__ import absolute_import
 from __future__ import absolute_import
@@ -114,8 +114,9 @@ class JDE(BaseArch):
                 return online_targets
                 return online_targets
 
 
             else:
             else:
-                raise ValueError("Unknown metric {} for multi object tracking.".
-                                 format(self.metric))
+                raise ValueError(
+                    "Unknown metric {} for multi object tracking.".format(
+                        self.metric))
 
 
     def get_loss(self):
     def get_loss(self):
         return self._forward()
         return self._forward()

+ 14 - 14
paddlex/ppdet/modeling/architectures/keypoint_hrhrnet.py

@@ -1,15 +1,15 @@
-# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. 
-#   
-# Licensed under the Apache License, Version 2.0 (the "License");   
-# you may not use this file except in compliance with the License.  
-# You may obtain a copy of the License at   
-#   
-#     http://www.apache.org/licenses/LICENSE-2.0    
-#   
-# Unless required by applicable law or agreed to in writing, software   
-# distributed under the License is distributed on an "AS IS" BASIS, 
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  
-# See the License for the specific language governing permissions and   
+# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
 # limitations under the License.
 # limitations under the License.
 
 
 from __future__ import absolute_import
 from __future__ import absolute_import
@@ -185,8 +185,8 @@ class HrHRNetPostProcess(object):
         inds_np = inds_k[0].cpu().detach().numpy()
         inds_np = inds_k[0].cpu().detach().numpy()
         y = inds_np // W
         y = inds_np // W
         x = inds_np % W
         x = inds_np % W
-        tags = tagmap[np.arange(J)[None, :].repeat(self.max_num_people),
-                      y.flatten(), x.flatten()].reshape(J, -1, tagmap.shape[-1])
+        tags = tagmap[np.arange(J)[None, :].repeat(self.max_num_people), y.
+                      flatten(), x.flatten()].reshape(J, -1, tagmap.shape[-1])
         coords = np.stack((y, x), axis=2)
         coords = np.stack((y, x), axis=2)
         # threshold
         # threshold
         mask = heats > self.heat_thresh
         mask = heats > self.heat_thresh

+ 4 - 4
paddlex/ppdet/modeling/architectures/keypoint_hrnet.py

@@ -1,15 +1,15 @@
-# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. 
+# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
 #
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License. 
-# You may obtain a copy of the License at 
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
 #
 #
 #     http://www.apache.org/licenses/LICENSE-2.0
 #     http://www.apache.org/licenses/LICENSE-2.0
 #
 #
 # Unless required by applicable law or agreed to in writing, software
 # Unless required by applicable law or agreed to in writing, software
 # distributed under the License is distributed on an "AS IS" BASIS,
 # distributed under the License is distributed on an "AS IS" BASIS,
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and 
+# See the License for the specific language governing permissions and
 # limitations under the License.
 # limitations under the License.
 
 
 from __future__ import absolute_import
 from __future__ import absolute_import

+ 12 - 12
paddlex/ppdet/modeling/architectures/mask_rcnn.py

@@ -1,15 +1,15 @@
-# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. 
-#   
-# Licensed under the Apache License, Version 2.0 (the "License");   
-# you may not use this file except in compliance with the License.  
-# You may obtain a copy of the License at   
-#   
-#     http://www.apache.org/licenses/LICENSE-2.0    
-#   
-# Unless required by applicable law or agreed to in writing, software   
-# distributed under the License is distributed on an "AS IS" BASIS, 
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  
-# See the License for the specific language governing permissions and   
+# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
 # limitations under the License.
 # limitations under the License.
 
 
 from __future__ import absolute_import
 from __future__ import absolute_import

+ 11 - 11
paddlex/ppdet/modeling/architectures/s2anet.py

@@ -1,15 +1,15 @@
 # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
 # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
-#   
-# Licensed under the Apache License, Version 2.0 (the "License");   
-# you may not use this file except in compliance with the License.  
-# You may obtain a copy of the License at   
-#   
-#     http://www.apache.org/licenses/LICENSE-2.0    
-#   
-# Unless required by applicable law or agreed to in writing, software   
-# distributed under the License is distributed on an "AS IS" BASIS, 
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  
-# See the License for the specific language governing permissions and   
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
 # limitations under the License.
 # limitations under the License.
 
 
 from __future__ import absolute_import
 from __future__ import absolute_import

Some files were not shown because too many files changed in this diff