Bladeren bron

delete nets/ppcls

will-jl944 4 jaren geleden
bovenliggende
commit
8fefb59d62

+ 1 - 1
dygraph/paddlex/cv/models/classifier.py

@@ -22,9 +22,9 @@ import paddle.nn.functional as F
 from paddle.static import InputSpec
 from paddlex.utils import logging, TrainingStats
 from paddlex.cv.models.base import BaseModel
+from paddlex.cv.transforms import arrange_transforms
 from PaddleClas.ppcls.modeling import architectures
 from PaddleClas.ppcls.modeling.loss import CELoss
-from paddlex.cv.transforms import arrange_transforms
 
 __all__ = [
     "ResNet18", "ResNet34", "ResNet50", "ResNet101", "ResNet152",

+ 0 - 13
dygraph/paddlex/cv/nets/ppcls/__init__.py

@@ -1,13 +0,0 @@
-# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#    http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.

+ 0 - 0
dygraph/paddlex/cv/nets/ppcls/modeling/__init__.py


+ 0 - 25
dygraph/paddlex/cv/nets/ppcls/modeling/architectures/__init__.py

@@ -1,25 +0,0 @@
-# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#    http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-from .alexnet import AlexNet
-from .darknet import DarkNet53
-from .mobilenet_v1 import MobileNetV1
-from .mobilenet_v2 import MobileNetV2
-from .mobilenet_v3 import MobileNetV3_small, MobileNetV3_large
-from .resnet import ResNet18, ResNet34, ResNet50, ResNet101, ResNet152
-from .resnet_vd import *
-from .densenet import DenseNet121, DenseNet161, DenseNet169, DenseNet201, DenseNet264
-from .hrnet import *
-from .xception import Xception41, Xception65, Xception71
-from .shufflenet_v2 import ShuffleNetV2, ShuffleNetV2_swish

+ 0 - 146
dygraph/paddlex/cv/nets/ppcls/modeling/architectures/alexnet.py

@@ -1,146 +0,0 @@
-# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#    http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import paddle
-from paddle import ParamAttr
-import paddle.nn as nn
-import paddle.nn.functional as F
-from paddle.nn import Conv2D, Linear, Dropout, ReLU
-from paddle.nn import MaxPool2D
-from paddle.nn.initializer import Uniform
-import math
-
-__all__ = ["AlexNet"]
-
-
-class ConvPoolLayer(nn.Layer):
-    def __init__(self,
-                 input_channels,
-                 output_channels,
-                 filter_size,
-                 stride,
-                 padding,
-                 stdv,
-                 groups=1,
-                 act=None,
-                 name=None):
-        super(ConvPoolLayer, self).__init__()
-
-        self.relu = ReLU() if act == "relu" else None
-
-        self._conv = Conv2D(
-            in_channels=input_channels,
-            out_channels=output_channels,
-            kernel_size=filter_size,
-            stride=stride,
-            padding=padding,
-            groups=groups,
-            weight_attr=ParamAttr(
-                name=name + "_weights", initializer=Uniform(-stdv, stdv)),
-            bias_attr=ParamAttr(
-                name=name + "_offset", initializer=Uniform(-stdv, stdv)))
-        self._pool = MaxPool2D(kernel_size=3, stride=2, padding=0)
-
-    def forward(self, inputs):
-        x = self._conv(inputs)
-        if self.relu is not None:
-            x = self.relu(x)
-        x = self._pool(x)
-        return x
-
-
-class AlexNetDY(nn.Layer):
-    def __init__(self, class_dim=1000):
-        super(AlexNetDY, self).__init__()
-
-        stdv = 1.0 / math.sqrt(3 * 11 * 11)
-        self._conv1 = ConvPoolLayer(
-            3, 64, 11, 4, 2, stdv, act="relu", name="conv1")
-        stdv = 1.0 / math.sqrt(64 * 5 * 5)
-        self._conv2 = ConvPoolLayer(
-            64, 192, 5, 1, 2, stdv, act="relu", name="conv2")
-        stdv = 1.0 / math.sqrt(192 * 3 * 3)
-        self._conv3 = Conv2D(
-            192,
-            384,
-            3,
-            stride=1,
-            padding=1,
-            weight_attr=ParamAttr(
-                name="conv3_weights", initializer=Uniform(-stdv, stdv)),
-            bias_attr=ParamAttr(
-                name="conv3_offset", initializer=Uniform(-stdv, stdv)))
-        stdv = 1.0 / math.sqrt(384 * 3 * 3)
-        self._conv4 = Conv2D(
-            384,
-            256,
-            3,
-            stride=1,
-            padding=1,
-            weight_attr=ParamAttr(
-                name="conv4_weights", initializer=Uniform(-stdv, stdv)),
-            bias_attr=ParamAttr(
-                name="conv4_offset", initializer=Uniform(-stdv, stdv)))
-        stdv = 1.0 / math.sqrt(256 * 3 * 3)
-        self._conv5 = ConvPoolLayer(
-            256, 256, 3, 1, 1, stdv, act="relu", name="conv5")
-        stdv = 1.0 / math.sqrt(256 * 6 * 6)
-
-        self._drop1 = Dropout(p=0.5, mode="downscale_in_infer")
-        self._fc6 = Linear(
-            in_features=256 * 6 * 6,
-            out_features=4096,
-            weight_attr=ParamAttr(
-                name="fc6_weights", initializer=Uniform(-stdv, stdv)),
-            bias_attr=ParamAttr(
-                name="fc6_offset", initializer=Uniform(-stdv, stdv)))
-
-        self._drop2 = Dropout(p=0.5, mode="downscale_in_infer")
-        self._fc7 = Linear(
-            in_features=4096,
-            out_features=4096,
-            weight_attr=ParamAttr(
-                name="fc7_weights", initializer=Uniform(-stdv, stdv)),
-            bias_attr=ParamAttr(
-                name="fc7_offset", initializer=Uniform(-stdv, stdv)))
-        self._fc8 = Linear(
-            in_features=4096,
-            out_features=class_dim,
-            weight_attr=ParamAttr(
-                name="fc8_weights", initializer=Uniform(-stdv, stdv)),
-            bias_attr=ParamAttr(
-                name="fc8_offset", initializer=Uniform(-stdv, stdv)))
-
-    def forward(self, inputs):
-        x = self._conv1(inputs)
-        x = self._conv2(x)
-        x = self._conv3(x)
-        x = F.relu(x)
-        x = self._conv4(x)
-        x = F.relu(x)
-        x = self._conv5(x)
-        x = paddle.flatten(x, start_axis=1, stop_axis=-1)
-        x = self._drop1(x)
-        x = self._fc6(x)
-        x = F.relu(x)
-        x = self._drop2(x)
-        x = self._fc7(x)
-        x = F.relu(x)
-        x = self._fc8(x)
-        return x
-
-
-def AlexNet(**args):
-    model = AlexNetDY(**args)
-    return model

+ 0 - 174
dygraph/paddlex/cv/nets/ppcls/modeling/architectures/darknet.py

@@ -1,174 +0,0 @@
-# copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#    http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import paddle
-from paddle import ParamAttr
-import paddle.nn as nn
-from paddle.nn import Conv2D, BatchNorm, Linear, Dropout
-from paddle.nn import AdaptiveAvgPool2D, MaxPool2D, AvgPool2D
-from paddle.nn.initializer import Uniform
-import math
-
-__all__ = ["DarkNet53"]
-
-
-class ConvBNLayer(nn.Layer):
-    def __init__(self,
-                 input_channels,
-                 output_channels,
-                 filter_size,
-                 stride,
-                 padding,
-                 name=None):
-        super(ConvBNLayer, self).__init__()
-
-        self._conv = Conv2D(
-            in_channels=input_channels,
-            out_channels=output_channels,
-            kernel_size=filter_size,
-            stride=stride,
-            padding=padding,
-            weight_attr=ParamAttr(name=name + ".conv.weights"),
-            bias_attr=False)
-
-        bn_name = name + ".bn"
-        self._bn = BatchNorm(
-            num_channels=output_channels,
-            act="relu",
-            param_attr=ParamAttr(name=bn_name + ".scale"),
-            bias_attr=ParamAttr(name=bn_name + ".offset"),
-            moving_mean_name=bn_name + ".mean",
-            moving_variance_name=bn_name + ".var")
-
-    def forward(self, inputs):
-        x = self._conv(inputs)
-        x = self._bn(x)
-        return x
-
-
-class BasicBlock(nn.Layer):
-    def __init__(self, input_channels, output_channels, name=None):
-        super(BasicBlock, self).__init__()
-
-        self._conv1 = ConvBNLayer(
-            input_channels, output_channels, 1, 1, 0, name=name + ".0")
-        self._conv2 = ConvBNLayer(
-            output_channels, output_channels * 2, 3, 1, 1, name=name + ".1")
-
-    def forward(self, inputs):
-        x = self._conv1(inputs)
-        x = self._conv2(x)
-        return paddle.add(x=inputs, y=x)
-
-
-class DarkNet(nn.Layer):
-    def __init__(self, class_dim=1000):
-        super(DarkNet, self).__init__()
-
-        self.stages = [1, 2, 8, 8, 4]
-        self._conv1 = ConvBNLayer(3, 32, 3, 1, 1, name="yolo_input")
-        self._conv2 = ConvBNLayer(
-            32, 64, 3, 2, 1, name="yolo_input.downsample")
-
-        self._basic_block_01 = BasicBlock(64, 32, name="stage.0.0")
-        self._downsample_0 = ConvBNLayer(
-            64, 128, 3, 2, 1, name="stage.0.downsample")
-
-        self._basic_block_11 = BasicBlock(128, 64, name="stage.1.0")
-        self._basic_block_12 = BasicBlock(128, 64, name="stage.1.1")
-        self._downsample_1 = ConvBNLayer(
-            128, 256, 3, 2, 1, name="stage.1.downsample")
-
-        self._basic_block_21 = BasicBlock(256, 128, name="stage.2.0")
-        self._basic_block_22 = BasicBlock(256, 128, name="stage.2.1")
-        self._basic_block_23 = BasicBlock(256, 128, name="stage.2.2")
-        self._basic_block_24 = BasicBlock(256, 128, name="stage.2.3")
-        self._basic_block_25 = BasicBlock(256, 128, name="stage.2.4")
-        self._basic_block_26 = BasicBlock(256, 128, name="stage.2.5")
-        self._basic_block_27 = BasicBlock(256, 128, name="stage.2.6")
-        self._basic_block_28 = BasicBlock(256, 128, name="stage.2.7")
-        self._downsample_2 = ConvBNLayer(
-            256, 512, 3, 2, 1, name="stage.2.downsample")
-
-        self._basic_block_31 = BasicBlock(512, 256, name="stage.3.0")
-        self._basic_block_32 = BasicBlock(512, 256, name="stage.3.1")
-        self._basic_block_33 = BasicBlock(512, 256, name="stage.3.2")
-        self._basic_block_34 = BasicBlock(512, 256, name="stage.3.3")
-        self._basic_block_35 = BasicBlock(512, 256, name="stage.3.4")
-        self._basic_block_36 = BasicBlock(512, 256, name="stage.3.5")
-        self._basic_block_37 = BasicBlock(512, 256, name="stage.3.6")
-        self._basic_block_38 = BasicBlock(512, 256, name="stage.3.7")
-        self._downsample_3 = ConvBNLayer(
-            512, 1024, 3, 2, 1, name="stage.3.downsample")
-
-        self._basic_block_41 = BasicBlock(1024, 512, name="stage.4.0")
-        self._basic_block_42 = BasicBlock(1024, 512, name="stage.4.1")
-        self._basic_block_43 = BasicBlock(1024, 512, name="stage.4.2")
-        self._basic_block_44 = BasicBlock(1024, 512, name="stage.4.3")
-
-        self._pool = AdaptiveAvgPool2D(1)
-
-        stdv = 1.0 / math.sqrt(1024.0)
-        self._out = Linear(
-            1024,
-            class_dim,
-            weight_attr=ParamAttr(
-                name="fc_weights", initializer=Uniform(-stdv, stdv)),
-            bias_attr=ParamAttr(name="fc_offset"))
-
-    def forward(self, inputs):
-        x = self._conv1(inputs)
-        x = self._conv2(x)
-
-        x = self._basic_block_01(x)
-        x = self._downsample_0(x)
-
-        x = self._basic_block_11(x)
-        x = self._basic_block_12(x)
-        x = self._downsample_1(x)
-
-        x = self._basic_block_21(x)
-        x = self._basic_block_22(x)
-        x = self._basic_block_23(x)
-        x = self._basic_block_24(x)
-        x = self._basic_block_25(x)
-        x = self._basic_block_26(x)
-        x = self._basic_block_27(x)
-        x = self._basic_block_28(x)
-        x = self._downsample_2(x)
-
-        x = self._basic_block_31(x)
-        x = self._basic_block_32(x)
-        x = self._basic_block_33(x)
-        x = self._basic_block_34(x)
-        x = self._basic_block_35(x)
-        x = self._basic_block_36(x)
-        x = self._basic_block_37(x)
-        x = self._basic_block_38(x)
-        x = self._downsample_3(x)
-
-        x = self._basic_block_41(x)
-        x = self._basic_block_42(x)
-        x = self._basic_block_43(x)
-        x = self._basic_block_44(x)
-
-        x = self._pool(x)
-        x = paddle.squeeze(x, axis=[2, 3])
-        x = self._out(x)
-        return x
-
-
-def DarkNet53(**args):
-    model = DarkNet(**args)
-    return model

+ 0 - 307
dygraph/paddlex/cv/nets/ppcls/modeling/architectures/densenet.py

@@ -1,307 +0,0 @@
-# copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#    http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-from __future__ import absolute_import
-from __future__ import division
-from __future__ import print_function
-
-import paddle
-from paddle import ParamAttr
-import paddle.nn as nn
-from paddle.nn import Conv2D, BatchNorm, Linear, Dropout
-from paddle.nn import AdaptiveAvgPool2D, MaxPool2D, AvgPool2D
-from paddle.nn.initializer import Uniform
-
-import math
-
-__all__ = [
-    "DenseNet121", "DenseNet161", "DenseNet169", "DenseNet201", "DenseNet264"
-]
-
-
-class BNACConvLayer(nn.Layer):
-    def __init__(self,
-                 num_channels,
-                 num_filters,
-                 filter_size,
-                 stride=1,
-                 pad=0,
-                 groups=1,
-                 act="relu",
-                 name=None):
-        super(BNACConvLayer, self).__init__()
-
-        self._batch_norm = BatchNorm(
-            num_channels,
-            act=act,
-            param_attr=ParamAttr(name=name + '_bn_scale'),
-            bias_attr=ParamAttr(name + '_bn_offset'),
-            moving_mean_name=name + '_bn_mean',
-            moving_variance_name=name + '_bn_variance')
-
-        self._conv = Conv2D(
-            in_channels=num_channels,
-            out_channels=num_filters,
-            kernel_size=filter_size,
-            stride=stride,
-            padding=pad,
-            groups=groups,
-            weight_attr=ParamAttr(name=name + "_weights"),
-            bias_attr=False)
-
-    def forward(self, input):
-        y = self._batch_norm(input)
-        y = self._conv(y)
-        return y
-
-
-class DenseLayer(nn.Layer):
-    def __init__(self, num_channels, growth_rate, bn_size, dropout, name=None):
-        super(DenseLayer, self).__init__()
-        self.dropout = dropout
-
-        self.bn_ac_func1 = BNACConvLayer(
-            num_channels=num_channels,
-            num_filters=bn_size * growth_rate,
-            filter_size=1,
-            pad=0,
-            stride=1,
-            name=name + "_x1")
-
-        self.bn_ac_func2 = BNACConvLayer(
-            num_channels=bn_size * growth_rate,
-            num_filters=growth_rate,
-            filter_size=3,
-            pad=1,
-            stride=1,
-            name=name + "_x2")
-
-        if dropout:
-            self.dropout_func = Dropout(p=dropout, mode="downscale_in_infer")
-
-    def forward(self, input):
-        conv = self.bn_ac_func1(input)
-        conv = self.bn_ac_func2(conv)
-        if self.dropout:
-            conv = self.dropout_func(conv)
-        conv = paddle.concat([input, conv], axis=1)
-        return conv
-
-
-class DenseBlock(nn.Layer):
-    def __init__(self,
-                 num_channels,
-                 num_layers,
-                 bn_size,
-                 growth_rate,
-                 dropout,
-                 name=None):
-        super(DenseBlock, self).__init__()
-        self.dropout = dropout
-
-        self.dense_layer_func = []
-
-        pre_channel = num_channels
-        for layer in range(num_layers):
-            self.dense_layer_func.append(
-                self.add_sublayer(
-                    "{}_{}".format(name, layer + 1),
-                    DenseLayer(
-                        num_channels=pre_channel,
-                        growth_rate=growth_rate,
-                        bn_size=bn_size,
-                        dropout=dropout,
-                        name=name + '_' + str(layer + 1))))
-            pre_channel = pre_channel + growth_rate
-
-    def forward(self, input):
-        conv = input
-        for func in self.dense_layer_func:
-            conv = func(conv)
-        return conv
-
-
-class TransitionLayer(nn.Layer):
-    def __init__(self, num_channels, num_output_features, name=None):
-        super(TransitionLayer, self).__init__()
-
-        self.conv_ac_func = BNACConvLayer(
-            num_channels=num_channels,
-            num_filters=num_output_features,
-            filter_size=1,
-            pad=0,
-            stride=1,
-            name=name)
-
-        self.pool2d_avg = AvgPool2D(kernel_size=2, stride=2, padding=0)
-
-    def forward(self, input):
-        y = self.conv_ac_func(input)
-        y = self.pool2d_avg(y)
-        return y
-
-
-class ConvBNLayer(nn.Layer):
-    def __init__(self,
-                 num_channels,
-                 num_filters,
-                 filter_size,
-                 stride=1,
-                 pad=0,
-                 groups=1,
-                 act="relu",
-                 name=None):
-        super(ConvBNLayer, self).__init__()
-
-        self._conv = Conv2D(
-            in_channels=num_channels,
-            out_channels=num_filters,
-            kernel_size=filter_size,
-            stride=stride,
-            padding=pad,
-            groups=groups,
-            weight_attr=ParamAttr(name=name + "_weights"),
-            bias_attr=False)
-        self._batch_norm = BatchNorm(
-            num_filters,
-            act=act,
-            param_attr=ParamAttr(name=name + '_bn_scale'),
-            bias_attr=ParamAttr(name + '_bn_offset'),
-            moving_mean_name=name + '_bn_mean',
-            moving_variance_name=name + '_bn_variance')
-
-    def forward(self, input):
-        y = self._conv(input)
-        y = self._batch_norm(y)
-        return y
-
-
-class DenseNet(nn.Layer):
-    def __init__(self, layers=60, bn_size=4, dropout=0, class_dim=1000):
-        super(DenseNet, self).__init__()
-
-        supported_layers = [121, 161, 169, 201, 264]
-        assert layers in supported_layers, \
-            "supported layers are {} but input layer is {}".format(
-                supported_layers, layers)
-        densenet_spec = {
-            121: (64, 32, [6, 12, 24, 16]),
-            161: (96, 48, [6, 12, 36, 24]),
-            169: (64, 32, [6, 12, 32, 32]),
-            201: (64, 32, [6, 12, 48, 32]),
-            264: (64, 32, [6, 12, 64, 48])
-        }
-        num_init_features, growth_rate, block_config = densenet_spec[layers]
-
-        self.conv1_func = ConvBNLayer(
-            num_channels=3,
-            num_filters=num_init_features,
-            filter_size=7,
-            stride=2,
-            pad=3,
-            act='relu',
-            name="conv1")
-
-        self.pool2d_max = MaxPool2D(kernel_size=3, stride=2, padding=1)
-
-        self.block_config = block_config
-
-        self.dense_block_func_list = []
-        self.transition_func_list = []
-        pre_num_channels = num_init_features
-        num_features = num_init_features
-        for i, num_layers in enumerate(block_config):
-            self.dense_block_func_list.append(
-                self.add_sublayer(
-                    "db_conv_{}".format(i + 2),
-                    DenseBlock(
-                        num_channels=pre_num_channels,
-                        num_layers=num_layers,
-                        bn_size=bn_size,
-                        growth_rate=growth_rate,
-                        dropout=dropout,
-                        name='conv' + str(i + 2))))
-
-            num_features = num_features + num_layers * growth_rate
-            pre_num_channels = num_features
-
-            if i != len(block_config) - 1:
-                self.transition_func_list.append(
-                    self.add_sublayer(
-                        "tr_conv{}_blk".format(i + 2),
-                        TransitionLayer(
-                            num_channels=pre_num_channels,
-                            num_output_features=num_features // 2,
-                            name='conv' + str(i + 2) + "_blk")))
-                pre_num_channels = num_features // 2
-                num_features = num_features // 2
-
-        self.batch_norm = BatchNorm(
-            num_features,
-            act="relu",
-            param_attr=ParamAttr(name='conv5_blk_bn_scale'),
-            bias_attr=ParamAttr(name='conv5_blk_bn_offset'),
-            moving_mean_name='conv5_blk_bn_mean',
-            moving_variance_name='conv5_blk_bn_variance')
-
-        self.pool2d_avg = AdaptiveAvgPool2D(1)
-
-        stdv = 1.0 / math.sqrt(num_features * 1.0)
-
-        self.out = Linear(
-            num_features,
-            class_dim,
-            weight_attr=ParamAttr(
-                initializer=Uniform(-stdv, stdv), name="fc_weights"),
-            bias_attr=ParamAttr(name="fc_offset"))
-
-    def forward(self, input):
-        conv = self.conv1_func(input)
-        conv = self.pool2d_max(conv)
-
-        for i, num_layers in enumerate(self.block_config):
-            conv = self.dense_block_func_list[i](conv)
-            if i != len(self.block_config) - 1:
-                conv = self.transition_func_list[i](conv)
-
-        conv = self.batch_norm(conv)
-        y = self.pool2d_avg(conv)
-        y = paddle.flatten(y, start_axis=1, stop_axis=-1)
-        y = self.out(y)
-        return y
-
-
-def DenseNet121(**args):
-    model = DenseNet(layers=121, **args)
-    return model
-
-
-def DenseNet161(**args):
-    model = DenseNet(layers=161, **args)
-    return model
-
-
-def DenseNet169(**args):
-    model = DenseNet(layers=169, **args)
-    return model
-
-
-def DenseNet201(**args):
-    model = DenseNet(layers=201, **args)
-    return model
-
-
-def DenseNet264(**args):
-    model = DenseNet(layers=264, **args)
-    return model

+ 0 - 683
dygraph/paddlex/cv/nets/ppcls/modeling/architectures/hrnet.py

@@ -1,683 +0,0 @@
-# copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#    http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-from __future__ import absolute_import
-from __future__ import division
-from __future__ import print_function
-
-import numpy as np
-import paddle
-from paddle import ParamAttr
-import paddle.nn as nn
-import paddle.nn.functional as F
-from paddle.nn import Conv2D, BatchNorm, Linear
-from paddle.nn import AdaptiveAvgPool2D, MaxPool2D, AvgPool2D
-from paddle.nn.initializer import Uniform
-
-import math
-
-__all__ = [
-    "HRNet_W18_C", "HRNet_W30_C", "HRNet_W32_C", "HRNet_W40_C", "HRNet_W44_C",
-    "HRNet_W48_C", "HRNet_W64_C"
-]
-
-
-class ConvBNLayer(nn.Layer):
-    def __init__(self,
-                 num_channels,
-                 num_filters,
-                 filter_size,
-                 stride=1,
-                 groups=1,
-                 act="relu",
-                 name=None):
-        super(ConvBNLayer, self).__init__()
-
-        self._conv = Conv2D(
-            in_channels=num_channels,
-            out_channels=num_filters,
-            kernel_size=filter_size,
-            stride=stride,
-            padding=(filter_size - 1) // 2,
-            groups=groups,
-            weight_attr=ParamAttr(name=name + "_weights"),
-            bias_attr=False)
-        bn_name = name + '_bn'
-        self._batch_norm = BatchNorm(
-            num_filters,
-            act=act,
-            param_attr=ParamAttr(name=bn_name + '_scale'),
-            bias_attr=ParamAttr(bn_name + '_offset'),
-            moving_mean_name=bn_name + '_mean',
-            moving_variance_name=bn_name + '_variance')
-
-    def forward(self, input):
-        y = self._conv(input)
-        y = self._batch_norm(y)
-        return y
-
-
-class Layer1(nn.Layer):
-    def __init__(self, num_channels, has_se=False, name=None):
-        super(Layer1, self).__init__()
-
-        self.bottleneck_block_list = []
-
-        for i in range(4):
-            bottleneck_block = self.add_sublayer(
-                "bb_{}_{}".format(name, i + 1),
-                BottleneckBlock(
-                    num_channels=num_channels if i == 0 else 256,
-                    num_filters=64,
-                    has_se=has_se,
-                    stride=1,
-                    downsample=True if i == 0 else False,
-                    name=name + '_' + str(i + 1)))
-            self.bottleneck_block_list.append(bottleneck_block)
-
-    def forward(self, input):
-        conv = input
-        for block_func in self.bottleneck_block_list:
-            conv = block_func(conv)
-        return conv
-
-
-class TransitionLayer(nn.Layer):
-    def __init__(self, in_channels, out_channels, name=None):
-        super(TransitionLayer, self).__init__()
-
-        num_in = len(in_channels)
-        num_out = len(out_channels)
-        out = []
-        self.conv_bn_func_list = []
-        for i in range(num_out):
-            residual = None
-            if i < num_in:
-                if in_channels[i] != out_channels[i]:
-                    residual = self.add_sublayer(
-                        "transition_{}_layer_{}".format(name, i + 1),
-                        ConvBNLayer(
-                            num_channels=in_channels[i],
-                            num_filters=out_channels[i],
-                            filter_size=3,
-                            name=name + '_layer_' + str(i + 1)))
-            else:
-                residual = self.add_sublayer(
-                    "transition_{}_layer_{}".format(name, i + 1),
-                    ConvBNLayer(
-                        num_channels=in_channels[-1],
-                        num_filters=out_channels[i],
-                        filter_size=3,
-                        stride=2,
-                        name=name + '_layer_' + str(i + 1)))
-            self.conv_bn_func_list.append(residual)
-
-    def forward(self, input):
-        outs = []
-        for idx, conv_bn_func in enumerate(self.conv_bn_func_list):
-            if conv_bn_func is None:
-                outs.append(input[idx])
-            else:
-                if idx < len(input):
-                    outs.append(conv_bn_func(input[idx]))
-                else:
-                    outs.append(conv_bn_func(input[-1]))
-        return outs
-
-
-class Branches(nn.Layer):
-    def __init__(self,
-                 block_num,
-                 in_channels,
-                 out_channels,
-                 has_se=False,
-                 name=None):
-        super(Branches, self).__init__()
-
-        self.basic_block_list = []
-
-        for i in range(len(out_channels)):
-            self.basic_block_list.append([])
-            for j in range(block_num):
-                in_ch = in_channels[i] if j == 0 else out_channels[i]
-                basic_block_func = self.add_sublayer(
-                    "bb_{}_branch_layer_{}_{}".format(name, i + 1, j + 1),
-                    BasicBlock(
-                        num_channels=in_ch,
-                        num_filters=out_channels[i],
-                        has_se=has_se,
-                        name=name + '_branch_layer_' + str(i + 1) + '_' +
-                        str(j + 1)))
-                self.basic_block_list[i].append(basic_block_func)
-
-    def forward(self, inputs):
-        outs = []
-        for idx, input in enumerate(inputs):
-            conv = input
-            basic_block_list = self.basic_block_list[idx]
-            for basic_block_func in basic_block_list:
-                conv = basic_block_func(conv)
-            outs.append(conv)
-        return outs
-
-
-class BottleneckBlock(nn.Layer):
-    def __init__(self,
-                 num_channels,
-                 num_filters,
-                 has_se,
-                 stride=1,
-                 downsample=False,
-                 name=None):
-        super(BottleneckBlock, self).__init__()
-
-        self.has_se = has_se
-        self.downsample = downsample
-
-        self.conv1 = ConvBNLayer(
-            num_channels=num_channels,
-            num_filters=num_filters,
-            filter_size=1,
-            act="relu",
-            name=name + "_conv1", )
-        self.conv2 = ConvBNLayer(
-            num_channels=num_filters,
-            num_filters=num_filters,
-            filter_size=3,
-            stride=stride,
-            act="relu",
-            name=name + "_conv2")
-        self.conv3 = ConvBNLayer(
-            num_channels=num_filters,
-            num_filters=num_filters * 4,
-            filter_size=1,
-            act=None,
-            name=name + "_conv3")
-
-        if self.downsample:
-            self.conv_down = ConvBNLayer(
-                num_channels=num_channels,
-                num_filters=num_filters * 4,
-                filter_size=1,
-                act=None,
-                name=name + "_downsample")
-
-        if self.has_se:
-            self.se = SELayer(
-                num_channels=num_filters * 4,
-                num_filters=num_filters * 4,
-                reduction_ratio=16,
-                name='fc' + name)
-
-    def forward(self, input):
-        residual = input
-        conv1 = self.conv1(input)
-        conv2 = self.conv2(conv1)
-        conv3 = self.conv3(conv2)
-
-        if self.downsample:
-            residual = self.conv_down(input)
-
-        if self.has_se:
-            conv3 = self.se(conv3)
-
-        y = paddle.add(x=residual, y=conv3)
-        y = F.relu(y)
-        return y
-
-
-class BasicBlock(nn.Layer):
-    def __init__(self,
-                 num_channels,
-                 num_filters,
-                 stride=1,
-                 has_se=False,
-                 downsample=False,
-                 name=None):
-        super(BasicBlock, self).__init__()
-
-        self.has_se = has_se
-        self.downsample = downsample
-
-        self.conv1 = ConvBNLayer(
-            num_channels=num_channels,
-            num_filters=num_filters,
-            filter_size=3,
-            stride=stride,
-            act="relu",
-            name=name + "_conv1")
-        self.conv2 = ConvBNLayer(
-            num_channels=num_filters,
-            num_filters=num_filters,
-            filter_size=3,
-            stride=1,
-            act=None,
-            name=name + "_conv2")
-
-        if self.downsample:
-            self.conv_down = ConvBNLayer(
-                num_channels=num_channels,
-                num_filters=num_filters * 4,
-                filter_size=1,
-                act="relu",
-                name=name + "_downsample")
-
-        if self.has_se:
-            self.se = SELayer(
-                num_channels=num_filters,
-                num_filters=num_filters,
-                reduction_ratio=16,
-                name='fc' + name)
-
-    def forward(self, input):
-        residual = input
-        conv1 = self.conv1(input)
-        conv2 = self.conv2(conv1)
-
-        if self.downsample:
-            residual = self.conv_down(input)
-
-        if self.has_se:
-            conv2 = self.se(conv2)
-
-        y = paddle.add(x=residual, y=conv2)
-        y = F.relu(y)
-        return y
-
-
-class SELayer(nn.Layer):
-    def __init__(self, num_channels, num_filters, reduction_ratio, name=None):
-        super(SELayer, self).__init__()
-
-        self.pool2d_gap = AdaptiveAvgPool2D(1)
-
-        self._num_channels = num_channels
-
-        med_ch = int(num_channels / reduction_ratio)
-        stdv = 1.0 / math.sqrt(num_channels * 1.0)
-        self.squeeze = Linear(
-            num_channels,
-            med_ch,
-            weight_attr=ParamAttr(
-                initializer=Uniform(-stdv, stdv), name=name + "_sqz_weights"),
-            bias_attr=ParamAttr(name=name + '_sqz_offset'))
-
-        stdv = 1.0 / math.sqrt(med_ch * 1.0)
-        self.excitation = Linear(
-            med_ch,
-            num_filters,
-            weight_attr=ParamAttr(
-                initializer=Uniform(-stdv, stdv), name=name + "_exc_weights"),
-            bias_attr=ParamAttr(name=name + '_exc_offset'))
-
-    def forward(self, input):
-        pool = self.pool2d_gap(input)
-        pool = paddle.squeeze(pool, axis=[2, 3])
-        squeeze = self.squeeze(pool)
-        squeeze = F.relu(squeeze)
-        excitation = self.excitation(squeeze)
-        excitation = F.sigmoid(excitation)
-        excitation = paddle.unsqueeze(excitation, axis=[2, 3])
-        out = input * excitation
-        return out
-
-
-class Stage(nn.Layer):
-    def __init__(self,
-                 num_channels,
-                 num_modules,
-                 num_filters,
-                 has_se=False,
-                 multi_scale_output=True,
-                 name=None):
-        super(Stage, self).__init__()
-
-        self._num_modules = num_modules
-
-        self.stage_func_list = []
-        for i in range(num_modules):
-            if i == num_modules - 1 and not multi_scale_output:
-                stage_func = self.add_sublayer(
-                    "stage_{}_{}".format(name, i + 1),
-                    HighResolutionModule(
-                        num_channels=num_channels,
-                        num_filters=num_filters,
-                        has_se=has_se,
-                        multi_scale_output=False,
-                        name=name + '_' + str(i + 1)))
-            else:
-                stage_func = self.add_sublayer(
-                    "stage_{}_{}".format(name, i + 1),
-                    HighResolutionModule(
-                        num_channels=num_channels,
-                        num_filters=num_filters,
-                        has_se=has_se,
-                        name=name + '_' + str(i + 1)))
-
-            self.stage_func_list.append(stage_func)
-
-    def forward(self, input):
-        out = input
-        for idx in range(self._num_modules):
-            out = self.stage_func_list[idx](out)
-        return out
-
-
-class HighResolutionModule(nn.Layer):
-    def __init__(self,
-                 num_channels,
-                 num_filters,
-                 has_se=False,
-                 multi_scale_output=True,
-                 name=None):
-        super(HighResolutionModule, self).__init__()
-
-        self.branches_func = Branches(
-            block_num=4,
-            in_channels=num_channels,
-            out_channels=num_filters,
-            has_se=has_se,
-            name=name)
-
-        self.fuse_func = FuseLayers(
-            in_channels=num_filters,
-            out_channels=num_filters,
-            multi_scale_output=multi_scale_output,
-            name=name)
-
-    def forward(self, input):
-        out = self.branches_func(input)
-        out = self.fuse_func(out)
-        return out
-
-
-class FuseLayers(nn.Layer):
-    def __init__(self,
-                 in_channels,
-                 out_channels,
-                 multi_scale_output=True,
-                 name=None):
-        super(FuseLayers, self).__init__()
-
-        self._actual_ch = len(in_channels) if multi_scale_output else 1
-        self._in_channels = in_channels
-
-        self.residual_func_list = []
-        for i in range(self._actual_ch):
-            for j in range(len(in_channels)):
-                residual_func = None
-                if j > i:
-                    residual_func = self.add_sublayer(
-                        "residual_{}_layer_{}_{}".format(name, i + 1, j + 1),
-                        ConvBNLayer(
-                            num_channels=in_channels[j],
-                            num_filters=out_channels[i],
-                            filter_size=1,
-                            stride=1,
-                            act=None,
-                            name=name + '_layer_' + str(i + 1) + '_' +
-                            str(j + 1)))
-                    self.residual_func_list.append(residual_func)
-                elif j < i:
-                    pre_num_filters = in_channels[j]
-                    for k in range(i - j):
-                        if k == i - j - 1:
-                            residual_func = self.add_sublayer(
-                                "residual_{}_layer_{}_{}_{}".format(
-                                    name, i + 1, j + 1, k + 1),
-                                ConvBNLayer(
-                                    num_channels=pre_num_filters,
-                                    num_filters=out_channels[i],
-                                    filter_size=3,
-                                    stride=2,
-                                    act=None,
-                                    name=name + '_layer_' + str(i + 1) + '_' +
-                                    str(j + 1) + '_' + str(k + 1)))
-                            pre_num_filters = out_channels[i]
-                        else:
-                            residual_func = self.add_sublayer(
-                                "residual_{}_layer_{}_{}_{}".format(
-                                    name, i + 1, j + 1, k + 1),
-                                ConvBNLayer(
-                                    num_channels=pre_num_filters,
-                                    num_filters=out_channels[j],
-                                    filter_size=3,
-                                    stride=2,
-                                    act="relu",
-                                    name=name + '_layer_' + str(i + 1) + '_' +
-                                    str(j + 1) + '_' + str(k + 1)))
-                            pre_num_filters = out_channels[j]
-                        self.residual_func_list.append(residual_func)
-
-    def forward(self, input):
-        outs = []
-        residual_func_idx = 0
-        for i in range(self._actual_ch):
-            residual = input[i]
-            for j in range(len(self._in_channels)):
-                if j > i:
-                    y = self.residual_func_list[residual_func_idx](input[j])
-                    residual_func_idx += 1
-
-                    y = F.upsample(y, scale_factor=2**(j - i), mode="nearest")
-                    residual = paddle.add(x=residual, y=y)
-                elif j < i:
-                    y = input[j]
-                    for k in range(i - j):
-                        y = self.residual_func_list[residual_func_idx](y)
-                        residual_func_idx += 1
-
-                    residual = paddle.add(x=residual, y=y)
-
-            residual = F.relu(residual)
-            outs.append(residual)
-
-        return outs
-
-
-class LastClsOut(nn.Layer):
-    def __init__(self,
-                 num_channel_list,
-                 has_se,
-                 num_filters_list=[32, 64, 128, 256],
-                 name=None):
-        super(LastClsOut, self).__init__()
-
-        self.func_list = []
-        for idx in range(len(num_channel_list)):
-            func = self.add_sublayer(
-                "conv_{}_conv_{}".format(name, idx + 1),
-                BottleneckBlock(
-                    num_channels=num_channel_list[idx],
-                    num_filters=num_filters_list[idx],
-                    has_se=has_se,
-                    downsample=True,
-                    name=name + 'conv_' + str(idx + 1)))
-            self.func_list.append(func)
-
-    def forward(self, inputs):
-        outs = []
-        for idx, input in enumerate(inputs):
-            out = self.func_list[idx](input)
-            outs.append(out)
-        return outs
-
-
-class HRNet(nn.Layer):
-    def __init__(self, width=18, has_se=False, class_dim=1000):
-        super(HRNet, self).__init__()
-
-        self.width = width
-        self.has_se = has_se
-        self.channels = {
-            18: [[18, 36], [18, 36, 72], [18, 36, 72, 144]],
-            30: [[30, 60], [30, 60, 120], [30, 60, 120, 240]],
-            32: [[32, 64], [32, 64, 128], [32, 64, 128, 256]],
-            40: [[40, 80], [40, 80, 160], [40, 80, 160, 320]],
-            44: [[44, 88], [44, 88, 176], [44, 88, 176, 352]],
-            48: [[48, 96], [48, 96, 192], [48, 96, 192, 384]],
-            60: [[60, 120], [60, 120, 240], [60, 120, 240, 480]],
-            64: [[64, 128], [64, 128, 256], [64, 128, 256, 512]]
-        }
-        self._class_dim = class_dim
-
-        channels_2, channels_3, channels_4 = self.channels[width]
-        num_modules_2, num_modules_3, num_modules_4 = 1, 4, 3
-
-        self.conv_layer1_1 = ConvBNLayer(
-            num_channels=3,
-            num_filters=64,
-            filter_size=3,
-            stride=2,
-            act='relu',
-            name="layer1_1")
-
-        self.conv_layer1_2 = ConvBNLayer(
-            num_channels=64,
-            num_filters=64,
-            filter_size=3,
-            stride=2,
-            act='relu',
-            name="layer1_2")
-
-        self.la1 = Layer1(num_channels=64, has_se=has_se, name="layer2")
-
-        self.tr1 = TransitionLayer(
-            in_channels=[256], out_channels=channels_2, name="tr1")
-
-        self.st2 = Stage(
-            num_channels=channels_2,
-            num_modules=num_modules_2,
-            num_filters=channels_2,
-            has_se=self.has_se,
-            name="st2")
-
-        self.tr2 = TransitionLayer(
-            in_channels=channels_2, out_channels=channels_3, name="tr2")
-        self.st3 = Stage(
-            num_channels=channels_3,
-            num_modules=num_modules_3,
-            num_filters=channels_3,
-            has_se=self.has_se,
-            name="st3")
-
-        self.tr3 = TransitionLayer(
-            in_channels=channels_3, out_channels=channels_4, name="tr3")
-        self.st4 = Stage(
-            num_channels=channels_4,
-            num_modules=num_modules_4,
-            num_filters=channels_4,
-            has_se=self.has_se,
-            name="st4")
-
-        # classification
-        num_filters_list = [32, 64, 128, 256]
-        self.last_cls = LastClsOut(
-            num_channel_list=channels_4,
-            has_se=self.has_se,
-            num_filters_list=num_filters_list,
-            name="cls_head", )
-
-        last_num_filters = [256, 512, 1024]
-        self.cls_head_conv_list = []
-        for idx in range(3):
-            self.cls_head_conv_list.append(
-                self.add_sublayer(
-                    "cls_head_add{}".format(idx + 1),
-                    ConvBNLayer(
-                        num_channels=num_filters_list[idx] * 4,
-                        num_filters=last_num_filters[idx],
-                        filter_size=3,
-                        stride=2,
-                        name="cls_head_add" + str(idx + 1))))
-
-        self.conv_last = ConvBNLayer(
-            num_channels=1024,
-            num_filters=2048,
-            filter_size=1,
-            stride=1,
-            name="cls_head_last_conv")
-
-        self.pool2d_avg = AdaptiveAvgPool2D(1)
-
-        stdv = 1.0 / math.sqrt(2048 * 1.0)
-
-        self.out = Linear(
-            2048,
-            class_dim,
-            weight_attr=ParamAttr(
-                initializer=Uniform(-stdv, stdv), name="fc_weights"),
-            bias_attr=ParamAttr(name="fc_offset"))
-
-    def forward(self, input):
-        conv1 = self.conv_layer1_1(input)
-        conv2 = self.conv_layer1_2(conv1)
-
-        la1 = self.la1(conv2)
-
-        tr1 = self.tr1([la1])
-        st2 = self.st2(tr1)
-
-        tr2 = self.tr2(st2)
-        st3 = self.st3(tr2)
-
-        tr3 = self.tr3(st3)
-        st4 = self.st4(tr3)
-
-        last_cls = self.last_cls(st4)
-
-        y = last_cls[0]
-        for idx in range(3):
-            y = paddle.add(last_cls[idx + 1], self.cls_head_conv_list[idx](y))
-
-        y = self.conv_last(y)
-        y = self.pool2d_avg(y)
-        y = paddle.reshape(y, shape=[-1, y.shape[1]])
-        y = self.out(y)
-        return y
-
-
-def HRNet_W18_C(**args):
-    model = HRNet(width=18, **args)
-    return model
-
-
-def HRNet_W30_C(**args):
-    model = HRNet(width=30, **args)
-    return model
-
-
-def HRNet_W32_C(**args):
-    model = HRNet(width=32, **args)
-    return model
-
-
-def HRNet_W40_C(**args):
-    model = HRNet(width=40, **args)
-    return model
-
-
-def HRNet_W44_C(**args):
-    model = HRNet(width=44, **args)
-    return model
-
-
-def HRNet_W48_C(**args):
-    model = HRNet(width=48, **args)
-    return model
-
-
-def HRNet_W64_C(**args):
-    model = HRNet(width=64, **args)
-    return model

+ 0 - 246
dygraph/paddlex/cv/nets/ppcls/modeling/architectures/mobilenet_v1.py

@@ -1,246 +0,0 @@
-# copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#    http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-from __future__ import absolute_import
-from __future__ import division
-from __future__ import print_function
-
-import paddle
-from paddle import ParamAttr
-import paddle.nn as nn
-from paddle.nn import Conv2D, BatchNorm, Linear, Dropout
-from paddle.nn import AdaptiveAvgPool2D, MaxPool2D, AvgPool2D
-from paddle.nn.initializer import KaimingNormal
-
-__all__ = ["MobileNetV1"]
-
-
-class ConvBNLayer(nn.Layer):
-    def __init__(self,
-                 num_channels,
-                 filter_size,
-                 num_filters,
-                 stride,
-                 padding,
-                 channels=None,
-                 num_groups=1,
-                 act='relu',
-                 name=None):
-        super(ConvBNLayer, self).__init__()
-
-        self._conv = Conv2D(
-            in_channels=num_channels,
-            out_channels=num_filters,
-            kernel_size=filter_size,
-            stride=stride,
-            padding=padding,
-            groups=num_groups,
-            weight_attr=ParamAttr(
-                initializer=KaimingNormal(), name=name + "_weights"),
-            bias_attr=False)
-
-        self._batch_norm = BatchNorm(
-            num_filters,
-            act=act,
-            param_attr=ParamAttr(name + "_bn_scale"),
-            bias_attr=ParamAttr(name + "_bn_offset"),
-            moving_mean_name=name + "_bn_mean",
-            moving_variance_name=name + "_bn_variance")
-
-    def forward(self, inputs):
-        y = self._conv(inputs)
-        y = self._batch_norm(y)
-        return y
-
-
-class DepthwiseSeparable(nn.Layer):
-    def __init__(self,
-                 num_channels,
-                 num_filters1,
-                 num_filters2,
-                 num_groups,
-                 stride,
-                 scale,
-                 name=None):
-        super(DepthwiseSeparable, self).__init__()
-
-        self._depthwise_conv = ConvBNLayer(
-            num_channels=num_channels,
-            num_filters=int(num_filters1 * scale),
-            filter_size=3,
-            stride=stride,
-            padding=1,
-            num_groups=int(num_groups * scale),
-            name=name + "_dw")
-
-        self._pointwise_conv = ConvBNLayer(
-            num_channels=int(num_filters1 * scale),
-            filter_size=1,
-            num_filters=int(num_filters2 * scale),
-            stride=1,
-            padding=0,
-            name=name + "_sep")
-
-    def forward(self, inputs):
-        y = self._depthwise_conv(inputs)
-        y = self._pointwise_conv(y)
-        return y
-
-
-class MobileNet(nn.Layer):
-    def __init__(self, scale=1.0, class_dim=1000):
-        super(MobileNet, self).__init__()
-        self.scale = scale
-        self.block_list = []
-
-        self.conv1 = ConvBNLayer(
-            num_channels=3,
-            filter_size=3,
-            channels=3,
-            num_filters=int(32 * scale),
-            stride=2,
-            padding=1,
-            name="conv1")
-
-        conv2_1 = self.add_sublayer(
-            "conv2_1",
-            sublayer=DepthwiseSeparable(
-                num_channels=int(32 * scale),
-                num_filters1=32,
-                num_filters2=64,
-                num_groups=32,
-                stride=1,
-                scale=scale,
-                name="conv2_1"))
-        self.block_list.append(conv2_1)
-
-        conv2_2 = self.add_sublayer(
-            "conv2_2",
-            sublayer=DepthwiseSeparable(
-                num_channels=int(64 * scale),
-                num_filters1=64,
-                num_filters2=128,
-                num_groups=64,
-                stride=2,
-                scale=scale,
-                name="conv2_2"))
-        self.block_list.append(conv2_2)
-
-        conv3_1 = self.add_sublayer(
-            "conv3_1",
-            sublayer=DepthwiseSeparable(
-                num_channels=int(128 * scale),
-                num_filters1=128,
-                num_filters2=128,
-                num_groups=128,
-                stride=1,
-                scale=scale,
-                name="conv3_1"))
-        self.block_list.append(conv3_1)
-
-        conv3_2 = self.add_sublayer(
-            "conv3_2",
-            sublayer=DepthwiseSeparable(
-                num_channels=int(128 * scale),
-                num_filters1=128,
-                num_filters2=256,
-                num_groups=128,
-                stride=2,
-                scale=scale,
-                name="conv3_2"))
-        self.block_list.append(conv3_2)
-
-        conv4_1 = self.add_sublayer(
-            "conv4_1",
-            sublayer=DepthwiseSeparable(
-                num_channels=int(256 * scale),
-                num_filters1=256,
-                num_filters2=256,
-                num_groups=256,
-                stride=1,
-                scale=scale,
-                name="conv4_1"))
-        self.block_list.append(conv4_1)
-
-        conv4_2 = self.add_sublayer(
-            "conv4_2",
-            sublayer=DepthwiseSeparable(
-                num_channels=int(256 * scale),
-                num_filters1=256,
-                num_filters2=512,
-                num_groups=256,
-                stride=2,
-                scale=scale,
-                name="conv4_2"))
-        self.block_list.append(conv4_2)
-
-        for i in range(5):
-            conv5 = self.add_sublayer(
-                "conv5_" + str(i + 1),
-                sublayer=DepthwiseSeparable(
-                    num_channels=int(512 * scale),
-                    num_filters1=512,
-                    num_filters2=512,
-                    num_groups=512,
-                    stride=1,
-                    scale=scale,
-                    name="conv5_" + str(i + 1)))
-            self.block_list.append(conv5)
-
-        conv5_6 = self.add_sublayer(
-            "conv5_6",
-            sublayer=DepthwiseSeparable(
-                num_channels=int(512 * scale),
-                num_filters1=512,
-                num_filters2=1024,
-                num_groups=512,
-                stride=2,
-                scale=scale,
-                name="conv5_6"))
-        self.block_list.append(conv5_6)
-
-        conv6 = self.add_sublayer(
-            "conv6",
-            sublayer=DepthwiseSeparable(
-                num_channels=int(1024 * scale),
-                num_filters1=1024,
-                num_filters2=1024,
-                num_groups=1024,
-                stride=1,
-                scale=scale,
-                name="conv6"))
-        self.block_list.append(conv6)
-
-        self.pool2d_avg = AdaptiveAvgPool2D(1)
-
-        self.out = Linear(
-            int(1024 * scale),
-            class_dim,
-            weight_attr=ParamAttr(
-                initializer=KaimingNormal(), name="fc7_weights"),
-            bias_attr=ParamAttr(name="fc7_offset"))
-
-    def forward(self, inputs):
-        y = self.conv1(inputs)
-        for block in self.block_list:
-            y = block(y)
-        y = self.pool2d_avg(y)
-        y = paddle.flatten(y, start_axis=1, stop_axis=-1)
-        y = self.out(y)
-        return y
-
-
-def MobileNetV1(scale=1.0, **args):
-    model = MobileNet(scale=scale, **args)
-    return model

+ 0 - 217
dygraph/paddlex/cv/nets/ppcls/modeling/architectures/mobilenet_v2.py

@@ -1,217 +0,0 @@
-# copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#    http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-from __future__ import absolute_import
-from __future__ import division
-from __future__ import print_function
-
-import paddle
-from paddle import ParamAttr
-import paddle.nn as nn
-import paddle.nn.functional as F
-from paddle.nn import Conv2D, BatchNorm, Linear, Dropout
-from paddle.nn import AdaptiveAvgPool2D, MaxPool2D, AvgPool2D
-
-__all__ = ["MobileNetV2"]
-
-
-class ConvBNLayer(nn.Layer):
-    def __init__(self,
-                 num_channels,
-                 filter_size,
-                 num_filters,
-                 stride,
-                 padding,
-                 channels=None,
-                 num_groups=1,
-                 name=None,
-                 use_cudnn=True):
-        super(ConvBNLayer, self).__init__()
-
-        self._conv = Conv2D(
-            in_channels=num_channels,
-            out_channels=num_filters,
-            kernel_size=filter_size,
-            stride=stride,
-            padding=padding,
-            groups=num_groups,
-            weight_attr=ParamAttr(name=name + "_weights"),
-            bias_attr=False)
-
-        self._batch_norm = BatchNorm(
-            num_filters,
-            param_attr=ParamAttr(name=name + "_bn_scale"),
-            bias_attr=ParamAttr(name=name + "_bn_offset"),
-            moving_mean_name=name + "_bn_mean",
-            moving_variance_name=name + "_bn_variance")
-
-    def forward(self, inputs, if_act=True):
-        y = self._conv(inputs)
-        y = self._batch_norm(y)
-        if if_act:
-            y = F.relu6(y)
-        return y
-
-
-class InvertedResidualUnit(nn.Layer):
-    def __init__(self, num_channels, num_in_filter, num_filters, stride,
-                 filter_size, padding, expansion_factor, name):
-        super(InvertedResidualUnit, self).__init__()
-        num_expfilter = int(round(num_in_filter * expansion_factor))
-        self._expand_conv = ConvBNLayer(
-            num_channels=num_channels,
-            num_filters=num_expfilter,
-            filter_size=1,
-            stride=1,
-            padding=0,
-            num_groups=1,
-            name=name + "_expand")
-
-        self._bottleneck_conv = ConvBNLayer(
-            num_channels=num_expfilter,
-            num_filters=num_expfilter,
-            filter_size=filter_size,
-            stride=stride,
-            padding=padding,
-            num_groups=num_expfilter,
-            use_cudnn=False,
-            name=name + "_dwise")
-
-        self._linear_conv = ConvBNLayer(
-            num_channels=num_expfilter,
-            num_filters=num_filters,
-            filter_size=1,
-            stride=1,
-            padding=0,
-            num_groups=1,
-            name=name + "_linear")
-
-    def forward(self, inputs, ifshortcut):
-        y = self._expand_conv(inputs, if_act=True)
-        y = self._bottleneck_conv(y, if_act=True)
-        y = self._linear_conv(y, if_act=False)
-        if ifshortcut:
-            y = paddle.add(inputs, y)
-        return y
-
-
-class InvresiBlocks(nn.Layer):
-    def __init__(self, in_c, t, c, n, s, name):
-        super(InvresiBlocks, self).__init__()
-
-        self._first_block = InvertedResidualUnit(
-            num_channels=in_c,
-            num_in_filter=in_c,
-            num_filters=c,
-            stride=s,
-            filter_size=3,
-            padding=1,
-            expansion_factor=t,
-            name=name + "_1")
-
-        self._block_list = []
-        for i in range(1, n):
-            block = self.add_sublayer(
-                name + "_" + str(i + 1),
-                sublayer=InvertedResidualUnit(
-                    num_channels=c,
-                    num_in_filter=c,
-                    num_filters=c,
-                    stride=1,
-                    filter_size=3,
-                    padding=1,
-                    expansion_factor=t,
-                    name=name + "_" + str(i + 1)))
-            self._block_list.append(block)
-
-    def forward(self, inputs):
-        y = self._first_block(inputs, ifshortcut=False)
-        for block in self._block_list:
-            y = block(y, ifshortcut=True)
-        return y
-
-
-class MobileNet(nn.Layer):
-    def __init__(self, class_dim=1000, scale=1.0):
-        super(MobileNet, self).__init__()
-        self.scale = scale
-        self.class_dim = class_dim
-
-        bottleneck_params_list = [
-            (1, 16, 1, 1),
-            (6, 24, 2, 2),
-            (6, 32, 3, 2),
-            (6, 64, 4, 2),
-            (6, 96, 3, 1),
-            (6, 160, 3, 2),
-            (6, 320, 1, 1),
-        ]
-
-        self.conv1 = ConvBNLayer(
-            num_channels=3,
-            num_filters=int(32 * scale),
-            filter_size=3,
-            stride=2,
-            padding=1,
-            name="conv1_1")
-
-        self.block_list = []
-        i = 1
-        in_c = int(32 * scale)
-        for layer_setting in bottleneck_params_list:
-            t, c, n, s = layer_setting
-            i += 1
-            block = self.add_sublayer(
-                "conv" + str(i),
-                sublayer=InvresiBlocks(
-                    in_c=in_c,
-                    t=t,
-                    c=int(c * scale),
-                    n=n,
-                    s=s,
-                    name="conv" + str(i)))
-            self.block_list.append(block)
-            in_c = int(c * scale)
-
-        self.out_c = int(1280 * scale) if scale > 1.0 else 1280
-        self.conv9 = ConvBNLayer(
-            num_channels=in_c,
-            num_filters=self.out_c,
-            filter_size=1,
-            stride=1,
-            padding=0,
-            name="conv9")
-
-        self.pool2d_avg = AdaptiveAvgPool2D(1)
-
-        self.out = Linear(
-            self.out_c,
-            class_dim,
-            weight_attr=ParamAttr(name="fc10_weights"),
-            bias_attr=ParamAttr(name="fc10_offset"))
-
-    def forward(self, inputs):
-        y = self.conv1(inputs, if_act=True)
-        for block in self.block_list:
-            y = block(y)
-        y = self.conv9(y, if_act=True)
-        y = self.pool2d_avg(y)
-        y = paddle.flatten(y, start_axis=1, stop_axis=-1)
-        y = self.out(y)
-        return y
-
-
-def MobileNetV2(scale=1.0, **args):
-    model = MobileNet(scale=scale, **args)
-    return model

+ 0 - 310
dygraph/paddlex/cv/nets/ppcls/modeling/architectures/mobilenet_v3.py

@@ -1,310 +0,0 @@
-# copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#    http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-from __future__ import absolute_import
-from __future__ import division
-from __future__ import print_function
-
-import paddle
-from paddle import ParamAttr
-import paddle.nn as nn
-import paddle.nn.functional as F
-from paddle.nn.functional import hardswish, hardsigmoid
-from paddle.nn import Conv2D, BatchNorm, Linear, Dropout
-from paddle.nn import AdaptiveAvgPool2D, MaxPool2D, AvgPool2D
-from paddle.regularizer import L2Decay
-
-__all__ = ["MobileNetV3_small", "MobileNetV3_large"]
-
-
-def make_divisible(v, divisor=8, min_value=None):
-    if min_value is None:
-        min_value = divisor
-    new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
-    if new_v < 0.9 * v:
-        new_v += divisor
-    return new_v
-
-
-class MobileNetV3(nn.Layer):
-    def __init__(self,
-                 scale=1.0,
-                 model_name="small",
-                 dropout_prob=0.2,
-                 class_dim=1000):
-        super(MobileNetV3, self).__init__()
-
-        inplanes = 16
-        if model_name == "large":
-            self.cfg = [
-                # k, exp, c,  se,     nl,  s,
-                [3, 16, 16, False, "relu", 1],
-                [3, 64, 24, False, "relu", 2],
-                [3, 72, 24, False, "relu", 1],
-                [5, 72, 40, True, "relu", 2],
-                [5, 120, 40, True, "relu", 1],
-                [5, 120, 40, True, "relu", 1],
-                [3, 240, 80, False, "hardswish", 2],
-                [3, 200, 80, False, "hardswish", 1],
-                [3, 184, 80, False, "hardswish", 1],
-                [3, 184, 80, False, "hardswish", 1],
-                [3, 480, 112, True, "hardswish", 1],
-                [3, 672, 112, True, "hardswish", 1],
-                [5, 672, 160, True, "hardswish", 2],
-                [5, 960, 160, True, "hardswish", 1],
-                [5, 960, 160, True, "hardswish", 1],
-            ]
-            self.cls_ch_squeeze = 960
-            self.cls_ch_expand = 1280
-        elif model_name == "small":
-            self.cfg = [
-                # k, exp, c,  se,     nl,  s,
-                [3, 16, 16, True, "relu", 2],
-                [3, 72, 24, False, "relu", 2],
-                [3, 88, 24, False, "relu", 1],
-                [5, 96, 40, True, "hardswish", 2],
-                [5, 240, 40, True, "hardswish", 1],
-                [5, 240, 40, True, "hardswish", 1],
-                [5, 120, 48, True, "hardswish", 1],
-                [5, 144, 48, True, "hardswish", 1],
-                [5, 288, 96, True, "hardswish", 2],
-                [5, 576, 96, True, "hardswish", 1],
-                [5, 576, 96, True, "hardswish", 1],
-            ]
-            self.cls_ch_squeeze = 576
-            self.cls_ch_expand = 1280
-        else:
-            raise NotImplementedError(
-                "mode[{}_model] is not implemented!".format(model_name))
-
-        self.conv1 = ConvBNLayer(
-            in_c=3,
-            out_c=make_divisible(inplanes * scale),
-            filter_size=3,
-            stride=2,
-            padding=1,
-            num_groups=1,
-            if_act=True,
-            act="hardswish",
-            name="conv1")
-
-        self.block_list = []
-        i = 0
-        inplanes = make_divisible(inplanes * scale)
-        for (k, exp, c, se, nl, s) in self.cfg:
-            block = self.add_sublayer(
-                "conv" + str(i + 2),
-                ResidualUnit(
-                    in_c=inplanes,
-                    mid_c=make_divisible(scale * exp),
-                    out_c=make_divisible(scale * c),
-                    filter_size=k,
-                    stride=s,
-                    use_se=se,
-                    act=nl,
-                    name="conv" + str(i + 2)))
-            self.block_list.append(block)
-            inplanes = make_divisible(scale * c)
-            i += 1
-
-        self.last_second_conv = ConvBNLayer(
-            in_c=inplanes,
-            out_c=make_divisible(scale * self.cls_ch_squeeze),
-            filter_size=1,
-            stride=1,
-            padding=0,
-            num_groups=1,
-            if_act=True,
-            act="hardswish",
-            name="conv_last")
-
-        self.pool = AdaptiveAvgPool2D(1)
-
-        self.last_conv = Conv2D(
-            in_channels=make_divisible(scale * self.cls_ch_squeeze),
-            out_channels=self.cls_ch_expand,
-            kernel_size=1,
-            stride=1,
-            padding=0,
-            weight_attr=ParamAttr(name="last_1x1_conv_weights"),
-            bias_attr=False)
-
-        self.dropout = Dropout(p=dropout_prob, mode="downscale_in_infer")
-
-        self.out = Linear(
-            self.cls_ch_expand,
-            class_dim,
-            weight_attr=ParamAttr("fc_weights"),
-            bias_attr=ParamAttr(name="fc_offset"))
-
-    def forward(self, inputs, label=None):
-        x = self.conv1(inputs)
-
-        for block in self.block_list:
-            x = block(x)
-
-        x = self.last_second_conv(x)
-        x = self.pool(x)
-
-        x = self.last_conv(x)
-        x = hardswish(x)
-        x = self.dropout(x)
-        x = paddle.flatten(x, start_axis=1, stop_axis=-1)
-        x = self.out(x)
-
-        return x
-
-
-class ConvBNLayer(nn.Layer):
-    def __init__(self,
-                 in_c,
-                 out_c,
-                 filter_size,
-                 stride,
-                 padding,
-                 num_groups=1,
-                 if_act=True,
-                 act=None,
-                 use_cudnn=True,
-                 name=""):
-        super(ConvBNLayer, self).__init__()
-        self.if_act = if_act
-        self.act = act
-        self.conv = Conv2D(
-            in_channels=in_c,
-            out_channels=out_c,
-            kernel_size=filter_size,
-            stride=stride,
-            padding=padding,
-            groups=num_groups,
-            weight_attr=ParamAttr(name=name + "_weights"),
-            bias_attr=False)
-        self.bn = BatchNorm(
-            num_channels=out_c,
-            act=None,
-            param_attr=ParamAttr(
-                name=name + "_bn_scale", regularizer=L2Decay(0.0)),
-            bias_attr=ParamAttr(
-                name=name + "_bn_offset", regularizer=L2Decay(0.0)),
-            moving_mean_name=name + "_bn_mean",
-            moving_variance_name=name + "_bn_variance")
-
-    def forward(self, x):
-        x = self.conv(x)
-        x = self.bn(x)
-        if self.if_act:
-            if self.act == "relu":
-                x = F.relu(x)
-            elif self.act == "hardswish":
-                x = hardswish(x)
-            else:
-                print("The activation function is selected incorrectly.")
-                exit()
-        return x
-
-
-class ResidualUnit(nn.Layer):
-    def __init__(self,
-                 in_c,
-                 mid_c,
-                 out_c,
-                 filter_size,
-                 stride,
-                 use_se,
-                 act=None,
-                 name=''):
-        super(ResidualUnit, self).__init__()
-        self.if_shortcut = stride == 1 and in_c == out_c
-        self.if_se = use_se
-
-        self.expand_conv = ConvBNLayer(
-            in_c=in_c,
-            out_c=mid_c,
-            filter_size=1,
-            stride=1,
-            padding=0,
-            if_act=True,
-            act=act,
-            name=name + "_expand")
-        self.bottleneck_conv = ConvBNLayer(
-            in_c=mid_c,
-            out_c=mid_c,
-            filter_size=filter_size,
-            stride=stride,
-            padding=int((filter_size - 1) // 2),
-            num_groups=mid_c,
-            if_act=True,
-            act=act,
-            name=name + "_depthwise")
-        if self.if_se:
-            self.mid_se = SEModule(mid_c, name=name + "_se")
-        self.linear_conv = ConvBNLayer(
-            in_c=mid_c,
-            out_c=out_c,
-            filter_size=1,
-            stride=1,
-            padding=0,
-            if_act=False,
-            act=None,
-            name=name + "_linear")
-
-    def forward(self, inputs):
-        x = self.expand_conv(inputs)
-        x = self.bottleneck_conv(x)
-        if self.if_se:
-            x = self.mid_se(x)
-        x = self.linear_conv(x)
-        if self.if_shortcut:
-            x = paddle.add(inputs, x)
-        return x
-
-
-class SEModule(nn.Layer):
-    def __init__(self, channel, reduction=4, name=""):
-        super(SEModule, self).__init__()
-        self.avg_pool = AdaptiveAvgPool2D(1)
-        self.conv1 = Conv2D(
-            in_channels=channel,
-            out_channels=channel // reduction,
-            kernel_size=1,
-            stride=1,
-            padding=0,
-            weight_attr=ParamAttr(name=name + "_1_weights"),
-            bias_attr=ParamAttr(name=name + "_1_offset"))
-        self.conv2 = Conv2D(
-            in_channels=channel // reduction,
-            out_channels=channel,
-            kernel_size=1,
-            stride=1,
-            padding=0,
-            weight_attr=ParamAttr(name + "_2_weights"),
-            bias_attr=ParamAttr(name=name + "_2_offset"))
-
-    def forward(self, inputs):
-        outputs = self.avg_pool(inputs)
-        outputs = self.conv1(outputs)
-        outputs = F.relu(outputs)
-        outputs = self.conv2(outputs)
-        outputs = hardsigmoid(outputs, slope=0.2, offset=0.5)
-        return paddle.multiply(x=inputs, y=outputs)
-
-
-def MobileNetV3_small(scale=1.0, **args):
-    model = MobileNetV3(model_name="small", scale=scale, **args)
-    return model
-
-
-def MobileNetV3_large(scale=1.0, **args):
-    model = MobileNetV3(model_name="large", scale=scale, **args)
-    return model

+ 0 - 312
dygraph/paddlex/cv/nets/ppcls/modeling/architectures/resnet.py

@@ -1,312 +0,0 @@
-# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#    http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-from __future__ import absolute_import
-from __future__ import division
-from __future__ import print_function
-
-import paddle
-from paddle import ParamAttr
-import paddle.nn as nn
-import paddle.nn.functional as F
-from paddle.nn import Conv2D, BatchNorm, Linear
-from paddle.nn import AdaptiveAvgPool2D, MaxPool2D
-from paddle.nn.initializer import Uniform
-
-import math
-
-__all__ = ["ResNet18", "ResNet34", "ResNet50", "ResNet101", "ResNet152"]
-
-
-class ConvBNLayer(nn.Layer):
-    def __init__(self,
-                 num_channels,
-                 num_filters,
-                 filter_size,
-                 stride=1,
-                 groups=1,
-                 act=None,
-                 name=None,
-                 data_format="NCHW"):
-        super(ConvBNLayer, self).__init__()
-
-        self._conv = Conv2D(
-            in_channels=num_channels,
-            out_channels=num_filters,
-            kernel_size=filter_size,
-            stride=stride,
-            padding=(filter_size - 1) // 2,
-            groups=groups,
-            weight_attr=ParamAttr(name=name + "_weights"),
-            bias_attr=False,
-            data_format=data_format)
-        if name == "conv1":
-            bn_name = "bn_" + name
-        else:
-            bn_name = "bn" + name[3:]
-        self._batch_norm = BatchNorm(
-            num_filters,
-            act=act,
-            param_attr=ParamAttr(name=bn_name + "_scale"),
-            bias_attr=ParamAttr(bn_name + "_offset"),
-            moving_mean_name=bn_name + "_mean",
-            moving_variance_name=bn_name + "_variance",
-            data_layout=data_format)
-
-    def forward(self, inputs):
-        y = self._conv(inputs)
-        y = self._batch_norm(y)
-        return y
-
-
-class BottleneckBlock(nn.Layer):
-    def __init__(self,
-                 num_channels,
-                 num_filters,
-                 stride,
-                 shortcut=True,
-                 name=None,
-                 data_format="NCHW"):
-        super(BottleneckBlock, self).__init__()
-
-        self.conv0 = ConvBNLayer(
-            num_channels=num_channels,
-            num_filters=num_filters,
-            filter_size=1,
-            act="relu",
-            name=name + "_branch2a",
-            data_format=data_format)
-        self.conv1 = ConvBNLayer(
-            num_channels=num_filters,
-            num_filters=num_filters,
-            filter_size=3,
-            stride=stride,
-            act="relu",
-            name=name + "_branch2b",
-            data_format=data_format)
-        self.conv2 = ConvBNLayer(
-            num_channels=num_filters,
-            num_filters=num_filters * 4,
-            filter_size=1,
-            act=None,
-            name=name + "_branch2c",
-            data_format=data_format)
-
-        if not shortcut:
-            self.short = ConvBNLayer(
-                num_channels=num_channels,
-                num_filters=num_filters * 4,
-                filter_size=1,
-                stride=stride,
-                name=name + "_branch1",
-                data_format=data_format)
-
-        self.shortcut = shortcut
-
-        self._num_channels_out = num_filters * 4
-
-    def forward(self, inputs):
-        y = self.conv0(inputs)
-        conv1 = self.conv1(y)
-        conv2 = self.conv2(conv1)
-
-        if self.shortcut:
-            short = inputs
-        else:
-            short = self.short(inputs)
-
-        y = paddle.add(x=short, y=conv2)
-        y = F.relu(y)
-        return y
-
-
-class BasicBlock(nn.Layer):
-    def __init__(self,
-                 num_channels,
-                 num_filters,
-                 stride,
-                 shortcut=True,
-                 name=None,
-                 data_format="NCHW"):
-        super(BasicBlock, self).__init__()
-        self.stride = stride
-        self.conv0 = ConvBNLayer(
-            num_channels=num_channels,
-            num_filters=num_filters,
-            filter_size=3,
-            stride=stride,
-            act="relu",
-            name=name + "_branch2a",
-            data_format=data_format)
-        self.conv1 = ConvBNLayer(
-            num_channels=num_filters,
-            num_filters=num_filters,
-            filter_size=3,
-            act=None,
-            name=name + "_branch2b",
-            data_format=data_format)
-
-        if not shortcut:
-            self.short = ConvBNLayer(
-                num_channels=num_channels,
-                num_filters=num_filters,
-                filter_size=1,
-                stride=stride,
-                name=name + "_branch1",
-                data_format=data_format)
-
-        self.shortcut = shortcut
-
-    def forward(self, inputs):
-        y = self.conv0(inputs)
-        conv1 = self.conv1(y)
-
-        if self.shortcut:
-            short = inputs
-        else:
-            short = self.short(inputs)
-        y = paddle.add(x=short, y=conv1)
-        y = F.relu(y)
-        return y
-
-
-class ResNet(nn.Layer):
-    def __init__(self,
-                 layers=50,
-                 class_dim=1000,
-                 input_image_channel=3,
-                 data_format="NCHW"):
-        super(ResNet, self).__init__()
-
-        self.layers = layers
-        self.data_format = data_format
-        self.input_image_channel = input_image_channel
-
-        supported_layers = [18, 34, 50, 101, 152]
-        assert layers in supported_layers, \
-            "supported layers are {} but input layer is {}".format(
-                supported_layers, layers)
-
-        if layers == 18:
-            depth = [2, 2, 2, 2]
-        elif layers == 34 or layers == 50:
-            depth = [3, 4, 6, 3]
-        elif layers == 101:
-            depth = [3, 4, 23, 3]
-        elif layers == 152:
-            depth = [3, 8, 36, 3]
-        num_channels = [64, 256, 512,
-                        1024] if layers >= 50 else [64, 64, 128, 256]
-        num_filters = [64, 128, 256, 512]
-
-        self.conv = ConvBNLayer(
-            num_channels=self.input_image_channel,
-            num_filters=64,
-            filter_size=7,
-            stride=2,
-            act="relu",
-            name="conv1",
-            data_format=self.data_format)
-        self.pool2d_max = MaxPool2D(
-            kernel_size=3, stride=2, padding=1, data_format=self.data_format)
-
-        self.block_list = []
-        if layers >= 50:
-            for block in range(len(depth)):
-                shortcut = False
-                for i in range(depth[block]):
-                    if layers in [101, 152] and block == 2:
-                        if i == 0:
-                            conv_name = "res" + str(block + 2) + "a"
-                        else:
-                            conv_name = "res" + str(block + 2) + "b" + str(i)
-                    else:
-                        conv_name = "res" + str(block + 2) + chr(97 + i)
-                    bottleneck_block = self.add_sublayer(
-                        conv_name,
-                        BottleneckBlock(
-                            num_channels=num_channels[block]
-                            if i == 0 else num_filters[block] * 4,
-                            num_filters=num_filters[block],
-                            stride=2 if i == 0 and block != 0 else 1,
-                            shortcut=shortcut,
-                            name=conv_name,
-                            data_format=self.data_format))
-                    self.block_list.append(bottleneck_block)
-                    shortcut = True
-        else:
-            for block in range(len(depth)):
-                shortcut = False
-                for i in range(depth[block]):
-                    conv_name = "res" + str(block + 2) + chr(97 + i)
-                    basic_block = self.add_sublayer(
-                        conv_name,
-                        BasicBlock(
-                            num_channels=num_channels[block]
-                            if i == 0 else num_filters[block],
-                            num_filters=num_filters[block],
-                            stride=2 if i == 0 and block != 0 else 1,
-                            shortcut=shortcut,
-                            name=conv_name,
-                            data_format=self.data_format))
-                    self.block_list.append(basic_block)
-                    shortcut = True
-
-        self.pool2d_avg = AdaptiveAvgPool2D(1, data_format=self.data_format)
-
-        self.pool2d_avg_channels = num_channels[-1] * 2
-
-        stdv = 1.0 / math.sqrt(self.pool2d_avg_channels * 1.0)
-
-        self.out = Linear(
-            self.pool2d_avg_channels,
-            class_dim,
-            weight_attr=ParamAttr(
-                initializer=Uniform(-stdv, stdv), name="fc_0.w_0"),
-            bias_attr=ParamAttr(name="fc_0.b_0"))
-
-    def forward(self, inputs):
-        y = self.conv(inputs)
-        y = self.pool2d_max(y)
-        for block in self.block_list:
-            y = block(y)
-        y = self.pool2d_avg(y)
-        y = paddle.reshape(y, shape=[-1, self.pool2d_avg_channels])
-        y = self.out(y)
-        return y
-
-
-def ResNet18(**args):
-    model = ResNet(layers=18, **args)
-    return model
-
-
-def ResNet34(**args):
-    model = ResNet(layers=34, **args)
-    return model
-
-
-def ResNet50(**args):
-    model = ResNet(layers=50, **args)
-    return model
-
-
-def ResNet101(**args):
-    model = ResNet(layers=101, **args)
-    return model
-
-
-def ResNet152(**args):
-    model = ResNet(layers=152, **args)
-    return model

+ 0 - 364
dygraph/paddlex/cv/nets/ppcls/modeling/architectures/resnet_vd.py

@@ -1,364 +0,0 @@
-# copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#    http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-from __future__ import absolute_import
-from __future__ import division
-from __future__ import print_function
-
-import paddle
-from paddle import ParamAttr
-import paddle.nn as nn
-import paddle.nn.functional as F
-from paddle.nn import Conv2D, BatchNorm, Linear
-from paddle.nn import AdaptiveAvgPool2D, MaxPool2D, AvgPool2D
-from paddle.nn.initializer import Uniform
-
-import math
-
-__all__ = [
-    "ResNet18_vd", "ResNet34_vd", "ResNet50_vd", "ResNet50_vd_ssld",
-    "ResNet101_vd", "ResNet101_vd_ssld", "ResNet152_vd", "ResNet200_vd"
-]
-
-
-class ConvBNLayer(nn.Layer):
-    def __init__(self,
-                 num_channels,
-                 num_filters,
-                 filter_size,
-                 stride=1,
-                 groups=1,
-                 is_vd_mode=False,
-                 act=None,
-                 lr_mult=1.0,
-                 name=None):
-        super(ConvBNLayer, self).__init__()
-        self.is_vd_mode = is_vd_mode
-        self._pool2d_avg = AvgPool2D(
-            kernel_size=2, stride=2, padding=0, ceil_mode=True)
-        self._conv = Conv2D(
-            in_channels=num_channels,
-            out_channels=num_filters,
-            kernel_size=filter_size,
-            stride=stride,
-            padding=(filter_size - 1) // 2,
-            groups=groups,
-            weight_attr=ParamAttr(
-                name=name + "_weights", learning_rate=lr_mult),
-            bias_attr=False)
-        if name == "conv1":
-            bn_name = "bn_" + name
-        else:
-            bn_name = "bn" + name[3:]
-        self._batch_norm = BatchNorm(
-            num_filters,
-            act=act,
-            param_attr=ParamAttr(
-                name=bn_name + '_scale', learning_rate=lr_mult),
-            bias_attr=ParamAttr(
-                bn_name + '_offset', learning_rate=lr_mult),
-            moving_mean_name=bn_name + '_mean',
-            moving_variance_name=bn_name + '_variance')
-
-    def forward(self, inputs):
-        if self.is_vd_mode:
-            inputs = self._pool2d_avg(inputs)
-        y = self._conv(inputs)
-        y = self._batch_norm(y)
-        return y
-
-
-class BottleneckBlock(nn.Layer):
-    def __init__(self,
-                 num_channels,
-                 num_filters,
-                 stride,
-                 shortcut=True,
-                 if_first=False,
-                 lr_mult=1.0,
-                 name=None):
-        super(BottleneckBlock, self).__init__()
-
-        self.conv0 = ConvBNLayer(
-            num_channels=num_channels,
-            num_filters=num_filters,
-            filter_size=1,
-            act='relu',
-            lr_mult=lr_mult,
-            name=name + "_branch2a")
-        self.conv1 = ConvBNLayer(
-            num_channels=num_filters,
-            num_filters=num_filters,
-            filter_size=3,
-            stride=stride,
-            act='relu',
-            lr_mult=lr_mult,
-            name=name + "_branch2b")
-        self.conv2 = ConvBNLayer(
-            num_channels=num_filters,
-            num_filters=num_filters * 4,
-            filter_size=1,
-            act=None,
-            lr_mult=lr_mult,
-            name=name + "_branch2c")
-
-        if not shortcut:
-            self.short = ConvBNLayer(
-                num_channels=num_channels,
-                num_filters=num_filters * 4,
-                filter_size=1,
-                stride=1,
-                is_vd_mode=False if if_first else True,
-                lr_mult=lr_mult,
-                name=name + "_branch1")
-
-        self.shortcut = shortcut
-
-    def forward(self, inputs):
-        y = self.conv0(inputs)
-        conv1 = self.conv1(y)
-        conv2 = self.conv2(conv1)
-
-        if self.shortcut:
-            short = inputs
-        else:
-            short = self.short(inputs)
-        y = paddle.add(x=short, y=conv2)
-        y = F.relu(y)
-        return y
-
-
-class BasicBlock(nn.Layer):
-    def __init__(self,
-                 num_channels,
-                 num_filters,
-                 stride,
-                 shortcut=True,
-                 if_first=False,
-                 lr_mult=1.0,
-                 name=None):
-        super(BasicBlock, self).__init__()
-        self.stride = stride
-        self.conv0 = ConvBNLayer(
-            num_channels=num_channels,
-            num_filters=num_filters,
-            filter_size=3,
-            stride=stride,
-            act='relu',
-            lr_mult=lr_mult,
-            name=name + "_branch2a")
-        self.conv1 = ConvBNLayer(
-            num_channels=num_filters,
-            num_filters=num_filters,
-            filter_size=3,
-            act=None,
-            lr_mult=lr_mult,
-            name=name + "_branch2b")
-
-        if not shortcut:
-            self.short = ConvBNLayer(
-                num_channels=num_channels,
-                num_filters=num_filters,
-                filter_size=1,
-                stride=1,
-                is_vd_mode=False if if_first else True,
-                lr_mult=lr_mult,
-                name=name + "_branch1")
-
-        self.shortcut = shortcut
-
-    def forward(self, inputs):
-        y = self.conv0(inputs)
-        conv1 = self.conv1(y)
-
-        if self.shortcut:
-            short = inputs
-        else:
-            short = self.short(inputs)
-        y = paddle.add(x=short, y=conv1)
-        y = F.relu(y)
-        return y
-
-
-class ResNet_vd(nn.Layer):
-    def __init__(self,
-                 layers=50,
-                 class_dim=1000,
-                 lr_mult_list=[1.0, 1.0, 1.0, 1.0, 1.0]):
-        super(ResNet_vd, self).__init__()
-
-        self.layers = layers
-        supported_layers = [18, 34, 50, 101, 152, 200]
-        assert layers in supported_layers, \
-            "supported layers are {} but input layer is {}".format(
-                supported_layers, layers)
-
-        self.lr_mult_list = lr_mult_list
-        assert isinstance(self.lr_mult_list, (
-            list, tuple
-        )), "lr_mult_list should be in (list, tuple) but got {}".format(
-            type(self.lr_mult_list))
-        assert len(
-            self.lr_mult_list
-        ) == 5, "lr_mult_list length should should be 5 but got {}".format(
-            len(self.lr_mult_list))
-
-        if layers == 18:
-            depth = [2, 2, 2, 2]
-        elif layers == 34 or layers == 50:
-            depth = [3, 4, 6, 3]
-        elif layers == 101:
-            depth = [3, 4, 23, 3]
-        elif layers == 152:
-            depth = [3, 8, 36, 3]
-        elif layers == 200:
-            depth = [3, 12, 48, 3]
-        num_channels = [64, 256, 512,
-                        1024] if layers >= 50 else [64, 64, 128, 256]
-        num_filters = [64, 128, 256, 512]
-
-        self.conv1_1 = ConvBNLayer(
-            num_channels=3,
-            num_filters=32,
-            filter_size=3,
-            stride=2,
-            act='relu',
-            lr_mult=self.lr_mult_list[0],
-            name="conv1_1")
-        self.conv1_2 = ConvBNLayer(
-            num_channels=32,
-            num_filters=32,
-            filter_size=3,
-            stride=1,
-            act='relu',
-            lr_mult=self.lr_mult_list[0],
-            name="conv1_2")
-        self.conv1_3 = ConvBNLayer(
-            num_channels=32,
-            num_filters=64,
-            filter_size=3,
-            stride=1,
-            act='relu',
-            lr_mult=self.lr_mult_list[0],
-            name="conv1_3")
-        self.pool2d_max = MaxPool2D(kernel_size=3, stride=2, padding=1)
-
-        self.block_list = []
-        if layers >= 50:
-            for block in range(len(depth)):
-                shortcut = False
-                for i in range(depth[block]):
-                    if layers in [101, 152, 200] and block == 2:
-                        if i == 0:
-                            conv_name = "res" + str(block + 2) + "a"
-                        else:
-                            conv_name = "res" + str(block + 2) + "b" + str(i)
-                    else:
-                        conv_name = "res" + str(block + 2) + chr(97 + i)
-                    bottleneck_block = self.add_sublayer(
-                        'bb_%d_%d' % (block, i),
-                        BottleneckBlock(
-                            num_channels=num_channels[block]
-                            if i == 0 else num_filters[block] * 4,
-                            num_filters=num_filters[block],
-                            stride=2 if i == 0 and block != 0 else 1,
-                            shortcut=shortcut,
-                            if_first=block == i == 0,
-                            lr_mult=self.lr_mult_list[block + 1],
-                            name=conv_name))
-                    self.block_list.append(bottleneck_block)
-                    shortcut = True
-        else:
-            for block in range(len(depth)):
-                shortcut = False
-                for i in range(depth[block]):
-                    conv_name = "res" + str(block + 2) + chr(97 + i)
-                    basic_block = self.add_sublayer(
-                        'bb_%d_%d' % (block, i),
-                        BasicBlock(
-                            num_channels=num_channels[block]
-                            if i == 0 else num_filters[block],
-                            num_filters=num_filters[block],
-                            stride=2 if i == 0 and block != 0 else 1,
-                            shortcut=shortcut,
-                            if_first=block == i == 0,
-                            name=conv_name,
-                            lr_mult=self.lr_mult_list[block + 1]))
-                    self.block_list.append(basic_block)
-                    shortcut = True
-
-        self.pool2d_avg = AdaptiveAvgPool2D(1)
-
-        self.pool2d_avg_channels = num_channels[-1] * 2
-
-        stdv = 1.0 / math.sqrt(self.pool2d_avg_channels * 1.0)
-
-        self.out = Linear(
-            self.pool2d_avg_channels,
-            class_dim,
-            weight_attr=ParamAttr(
-                initializer=Uniform(-stdv, stdv), name="fc_0.w_0"),
-            bias_attr=ParamAttr(name="fc_0.b_0"))
-
-    def forward(self, inputs):
-        y = self.conv1_1(inputs)
-        y = self.conv1_2(y)
-        y = self.conv1_3(y)
-        y = self.pool2d_max(y)
-        for block in self.block_list:
-            y = block(y)
-        y = self.pool2d_avg(y)
-        y = paddle.reshape(y, shape=[-1, self.pool2d_avg_channels])
-        y = self.out(y)
-        return y
-
-
-def ResNet18_vd(**args):
-    model = ResNet_vd(layers=18, **args)
-    return model
-
-
-def ResNet34_vd(**args):
-    model = ResNet_vd(layers=34, **args)
-    return model
-
-
-def ResNet50_vd(**args):
-    model = ResNet_vd(layers=50, **args)
-    return model
-
-
-def ResNet101_vd(**args):
-    model = ResNet_vd(layers=101, **args)
-    return model
-
-
-def ResNet152_vd(**args):
-    model = ResNet_vd(layers=152, **args)
-    return model
-
-
-def ResNet200_vd(**args):
-    model = ResNet_vd(layers=200, **args)
-    return model
-
-
-def ResNet50_vd_ssld(**args):
-    model = ResNet_vd(layers=50, lr_mult_list=[.1, .1, .2, .2, .3], **args)
-    return model
-
-
-def ResNet101_vd_ssld(**args):
-    model = ResNet_vd(layers=101, lr_mult_list=[.1, .1, .2, .2, .3], **args)
-    return model

+ 0 - 291
dygraph/paddlex/cv/nets/ppcls/modeling/architectures/shufflenet_v2.py

@@ -1,291 +0,0 @@
-# copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#    http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-from __future__ import absolute_import
-from __future__ import division
-from __future__ import print_function
-
-import paddle
-from paddle import ParamAttr, reshape, transpose, concat, split
-from paddle.nn import Layer, Conv2D, MaxPool2D, AdaptiveAvgPool2D, BatchNorm, Linear
-from paddle.nn.initializer import KaimingNormal
-from paddle.nn.functional import swish
-
-__all__ = ["ShuffleNetV2", "ShuffleNetV2_swish"]
-
-
-def channel_shuffle(x, groups):
-    batch_size, num_channels, height, width = x.shape[0:4]
-    channels_per_group = num_channels // groups
-
-    # reshape
-    x = reshape(
-        x=x, shape=[batch_size, groups, channels_per_group, height, width])
-
-    # transpose
-    x = transpose(x=x, perm=[0, 2, 1, 3, 4])
-
-    # flatten
-    x = reshape(x=x, shape=[batch_size, num_channels, height, width])
-    return x
-
-
-class ConvBNLayer(Layer):
-    def __init__(
-            self,
-            in_channels,
-            out_channels,
-            kernel_size,
-            stride,
-            padding,
-            groups=1,
-            act=None,
-            name=None, ):
-        super(ConvBNLayer, self).__init__()
-        self._conv = Conv2D(
-            in_channels=in_channels,
-            out_channels=out_channels,
-            kernel_size=kernel_size,
-            stride=stride,
-            padding=padding,
-            groups=groups,
-            weight_attr=ParamAttr(
-                initializer=KaimingNormal(), name=name + "_weights"),
-            bias_attr=False)
-
-        self._batch_norm = BatchNorm(
-            out_channels,
-            param_attr=ParamAttr(name=name + "_bn_scale"),
-            bias_attr=ParamAttr(name=name + "_bn_offset"),
-            act=act,
-            moving_mean_name=name + "_bn_mean",
-            moving_variance_name=name + "_bn_variance")
-
-    def forward(self, inputs):
-        y = self._conv(inputs)
-        y = self._batch_norm(y)
-        return y
-
-
-class InvertedResidual(Layer):
-    def __init__(self,
-                 in_channels,
-                 out_channels,
-                 stride,
-                 act="relu",
-                 name=None):
-        super(InvertedResidual, self).__init__()
-        self._conv_pw = ConvBNLayer(
-            in_channels=in_channels // 2,
-            out_channels=out_channels // 2,
-            kernel_size=1,
-            stride=1,
-            padding=0,
-            groups=1,
-            act=act,
-            name='stage_' + name + '_conv1')
-        self._conv_dw = ConvBNLayer(
-            in_channels=out_channels // 2,
-            out_channels=out_channels // 2,
-            kernel_size=3,
-            stride=stride,
-            padding=1,
-            groups=out_channels // 2,
-            act=None,
-            name='stage_' + name + '_conv2')
-        self._conv_linear = ConvBNLayer(
-            in_channels=out_channels // 2,
-            out_channels=out_channels // 2,
-            kernel_size=1,
-            stride=1,
-            padding=0,
-            groups=1,
-            act=act,
-            name='stage_' + name + '_conv3')
-
-    def forward(self, inputs):
-        x1, x2 = split(
-            inputs,
-            num_or_sections=[inputs.shape[1] // 2, inputs.shape[1] // 2],
-            axis=1)
-        x2 = self._conv_pw(x2)
-        x2 = self._conv_dw(x2)
-        x2 = self._conv_linear(x2)
-        out = concat([x1, x2], axis=1)
-        return channel_shuffle(out, 2)
-
-
-class InvertedResidualDS(Layer):
-    def __init__(self,
-                 in_channels,
-                 out_channels,
-                 stride,
-                 act="relu",
-                 name=None):
-        super(InvertedResidualDS, self).__init__()
-
-        # branch1
-        self._conv_dw_1 = ConvBNLayer(
-            in_channels=in_channels,
-            out_channels=in_channels,
-            kernel_size=3,
-            stride=stride,
-            padding=1,
-            groups=in_channels,
-            act=None,
-            name='stage_' + name + '_conv4')
-        self._conv_linear_1 = ConvBNLayer(
-            in_channels=in_channels,
-            out_channels=out_channels // 2,
-            kernel_size=1,
-            stride=1,
-            padding=0,
-            groups=1,
-            act=act,
-            name='stage_' + name + '_conv5')
-        # branch2
-        self._conv_pw_2 = ConvBNLayer(
-            in_channels=in_channels,
-            out_channels=out_channels // 2,
-            kernel_size=1,
-            stride=1,
-            padding=0,
-            groups=1,
-            act=act,
-            name='stage_' + name + '_conv1')
-        self._conv_dw_2 = ConvBNLayer(
-            in_channels=out_channels // 2,
-            out_channels=out_channels // 2,
-            kernel_size=3,
-            stride=stride,
-            padding=1,
-            groups=out_channels // 2,
-            act=None,
-            name='stage_' + name + '_conv2')
-        self._conv_linear_2 = ConvBNLayer(
-            in_channels=out_channels // 2,
-            out_channels=out_channels // 2,
-            kernel_size=1,
-            stride=1,
-            padding=0,
-            groups=1,
-            act=act,
-            name='stage_' + name + '_conv3')
-
-    def forward(self, inputs):
-        x1 = self._conv_dw_1(inputs)
-        x1 = self._conv_linear_1(x1)
-        x2 = self._conv_pw_2(inputs)
-        x2 = self._conv_dw_2(x2)
-        x2 = self._conv_linear_2(x2)
-        out = concat([x1, x2], axis=1)
-
-        return channel_shuffle(out, 2)
-
-
-class ShuffleNet(Layer):
-    def __init__(self, class_dim=1000, scale=1.0, act="relu"):
-        super(ShuffleNet, self).__init__()
-        self.scale = scale
-        self.class_dim = class_dim
-        stage_repeats = [4, 8, 4]
-
-        if scale == 0.25:
-            stage_out_channels = [-1, 24, 24, 48, 96, 512]
-        elif scale == 0.33:
-            stage_out_channels = [-1, 24, 32, 64, 128, 512]
-        elif scale == 0.5:
-            stage_out_channels = [-1, 24, 48, 96, 192, 1024]
-        elif scale == 1.0:
-            stage_out_channels = [-1, 24, 116, 232, 464, 1024]
-        elif scale == 1.5:
-            stage_out_channels = [-1, 24, 176, 352, 704, 1024]
-        elif scale == 2.0:
-            stage_out_channels = [-1, 24, 224, 488, 976, 2048]
-        else:
-            raise NotImplementedError("This scale size:[" + str(scale) +
-                                      "] is not implemented!")
-        # 1. conv1
-        self._conv1 = ConvBNLayer(
-            in_channels=3,
-            out_channels=stage_out_channels[1],
-            kernel_size=3,
-            stride=2,
-            padding=1,
-            act=act,
-            name='stage1_conv')
-        self._max_pool = MaxPool2D(kernel_size=3, stride=2, padding=1)
-
-        # 2. bottleneck sequences
-        self._block_list = []
-        for stage_id, num_repeat in enumerate(stage_repeats):
-            for i in range(num_repeat):
-                if i == 0:
-                    block = self.add_sublayer(
-                        name=str(stage_id + 2) + '_' + str(i + 1),
-                        sublayer=InvertedResidualDS(
-                            in_channels=stage_out_channels[stage_id + 1],
-                            out_channels=stage_out_channels[stage_id + 2],
-                            stride=2,
-                            act=act,
-                            name=str(stage_id + 2) + '_' + str(i + 1)))
-                else:
-                    block = self.add_sublayer(
-                        name=str(stage_id + 2) + '_' + str(i + 1),
-                        sublayer=InvertedResidual(
-                            in_channels=stage_out_channels[stage_id + 2],
-                            out_channels=stage_out_channels[stage_id + 2],
-                            stride=1,
-                            act=act,
-                            name=str(stage_id + 2) + '_' + str(i + 1)))
-                self._block_list.append(block)
-        # 3. last_conv
-        self._last_conv = ConvBNLayer(
-            in_channels=stage_out_channels[-2],
-            out_channels=stage_out_channels[-1],
-            kernel_size=1,
-            stride=1,
-            padding=0,
-            act=act,
-            name='conv5')
-        # 4. pool
-        self._pool2d_avg = AdaptiveAvgPool2D(1)
-        self._out_c = stage_out_channels[-1]
-        # 5. fc
-        self._fc = Linear(
-            stage_out_channels[-1],
-            class_dim,
-            weight_attr=ParamAttr(name='fc6_weights'),
-            bias_attr=ParamAttr(name='fc6_offset'))
-
-    def forward(self, inputs):
-        y = self._conv1(inputs)
-        y = self._max_pool(y)
-        for inv in self._block_list:
-            y = inv(y)
-        y = self._last_conv(y)
-        y = self._pool2d_avg(y)
-        y = paddle.flatten(y, start_axis=1, stop_axis=-1)
-        y = self._fc(y)
-        return y
-
-
-def ShuffleNetV2(scale=1.0, **args):
-    model = ShuffleNet(scale=scale, **args)
-    return model
-
-
-def ShuffleNetV2_swish(**args):
-    model = ShuffleNet(scale=1.0, act="swish", **args)
-    return model

+ 0 - 359
dygraph/paddlex/cv/nets/ppcls/modeling/architectures/xception.py

@@ -1,359 +0,0 @@
-# copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#    http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import paddle
-from paddle import ParamAttr
-import paddle.nn as nn
-import paddle.nn.functional as F
-from paddle.nn import Conv2D, BatchNorm, Linear, Dropout
-from paddle.nn import AdaptiveAvgPool2D, MaxPool2D, AvgPool2D
-from paddle.nn.initializer import Uniform
-import math
-import sys
-
-__all__ = ['Xception41', 'Xception65', 'Xception71']
-
-
-class ConvBNLayer(nn.Layer):
-    def __init__(self,
-                 num_channels,
-                 num_filters,
-                 filter_size,
-                 stride=1,
-                 groups=1,
-                 act=None,
-                 name=None):
-        super(ConvBNLayer, self).__init__()
-
-        self._conv = Conv2D(
-            in_channels=num_channels,
-            out_channels=num_filters,
-            kernel_size=filter_size,
-            stride=stride,
-            padding=(filter_size - 1) // 2,
-            groups=groups,
-            weight_attr=ParamAttr(name=name + "_weights"),
-            bias_attr=False)
-        bn_name = "bn_" + name
-        self._batch_norm = BatchNorm(
-            num_filters,
-            act=act,
-            param_attr=ParamAttr(name=bn_name + "_scale"),
-            bias_attr=ParamAttr(name=bn_name + "_offset"),
-            moving_mean_name=bn_name + '_mean',
-            moving_variance_name=bn_name + '_variance')
-
-    def forward(self, inputs):
-        y = self._conv(inputs)
-        y = self._batch_norm(y)
-        return y
-
-
-class SeparableConv(nn.Layer):
-    def __init__(self, input_channels, output_channels, stride=1, name=None):
-        super(SeparableConv, self).__init__()
-
-        self._pointwise_conv = ConvBNLayer(
-            input_channels, output_channels, 1, name=name + "_sep")
-        self._depthwise_conv = ConvBNLayer(
-            output_channels,
-            output_channels,
-            3,
-            stride=stride,
-            groups=output_channels,
-            name=name + "_dw")
-
-    def forward(self, inputs):
-        x = self._pointwise_conv(inputs)
-        x = self._depthwise_conv(x)
-        return x
-
-
-class EntryFlowBottleneckBlock(nn.Layer):
-    def __init__(self,
-                 input_channels,
-                 output_channels,
-                 stride=2,
-                 name=None,
-                 relu_first=False):
-        super(EntryFlowBottleneckBlock, self).__init__()
-        self.relu_first = relu_first
-
-        self._short = Conv2D(
-            in_channels=input_channels,
-            out_channels=output_channels,
-            kernel_size=1,
-            stride=stride,
-            padding=0,
-            weight_attr=ParamAttr(name + "_branch1_weights"),
-            bias_attr=False)
-        self._conv1 = SeparableConv(
-            input_channels,
-            output_channels,
-            stride=1,
-            name=name + "_branch2a_weights")
-        self._conv2 = SeparableConv(
-            output_channels,
-            output_channels,
-            stride=1,
-            name=name + "_branch2b_weights")
-        self._pool = MaxPool2D(kernel_size=3, stride=stride, padding=1)
-
-    def forward(self, inputs):
-        conv0 = inputs
-        short = self._short(inputs)
-        if self.relu_first:
-            conv0 = F.relu(conv0)
-        conv1 = self._conv1(conv0)
-        conv2 = F.relu(conv1)
-        conv2 = self._conv2(conv2)
-        pool = self._pool(conv2)
-        return paddle.add(x=short, y=pool)
-
-
-class EntryFlow(nn.Layer):
-    def __init__(self, block_num=3):
-        super(EntryFlow, self).__init__()
-
-        name = "entry_flow"
-        self.block_num = block_num
-        self._conv1 = ConvBNLayer(
-            3, 32, 3, stride=2, act="relu", name=name + "_conv1")
-        self._conv2 = ConvBNLayer(32, 64, 3, act="relu", name=name + "_conv2")
-        if block_num == 3:
-            self._conv_0 = EntryFlowBottleneckBlock(
-                64, 128, stride=2, name=name + "_0", relu_first=False)
-            self._conv_1 = EntryFlowBottleneckBlock(
-                128, 256, stride=2, name=name + "_1", relu_first=True)
-            self._conv_2 = EntryFlowBottleneckBlock(
-                256, 728, stride=2, name=name + "_2", relu_first=True)
-        elif block_num == 5:
-            self._conv_0 = EntryFlowBottleneckBlock(
-                64, 128, stride=2, name=name + "_0", relu_first=False)
-            self._conv_1 = EntryFlowBottleneckBlock(
-                128, 256, stride=1, name=name + "_1", relu_first=True)
-            self._conv_2 = EntryFlowBottleneckBlock(
-                256, 256, stride=2, name=name + "_2", relu_first=True)
-            self._conv_3 = EntryFlowBottleneckBlock(
-                256, 728, stride=1, name=name + "_3", relu_first=True)
-            self._conv_4 = EntryFlowBottleneckBlock(
-                728, 728, stride=2, name=name + "_4", relu_first=True)
-        else:
-            sys.exit(-1)
-
-    def forward(self, inputs):
-        x = self._conv1(inputs)
-        x = self._conv2(x)
-
-        if self.block_num == 3:
-            x = self._conv_0(x)
-            x = self._conv_1(x)
-            x = self._conv_2(x)
-        elif self.block_num == 5:
-            x = self._conv_0(x)
-            x = self._conv_1(x)
-            x = self._conv_2(x)
-            x = self._conv_3(x)
-            x = self._conv_4(x)
-        return x
-
-
-class MiddleFlowBottleneckBlock(nn.Layer):
-    def __init__(self, input_channels, output_channels, name):
-        super(MiddleFlowBottleneckBlock, self).__init__()
-
-        self._conv_0 = SeparableConv(
-            input_channels,
-            output_channels,
-            stride=1,
-            name=name + "_branch2a_weights")
-        self._conv_1 = SeparableConv(
-            output_channels,
-            output_channels,
-            stride=1,
-            name=name + "_branch2b_weights")
-        self._conv_2 = SeparableConv(
-            output_channels,
-            output_channels,
-            stride=1,
-            name=name + "_branch2c_weights")
-
-    def forward(self, inputs):
-        conv0 = F.relu(inputs)
-        conv0 = self._conv_0(conv0)
-        conv1 = F.relu(conv0)
-        conv1 = self._conv_1(conv1)
-        conv2 = F.relu(conv1)
-        conv2 = self._conv_2(conv2)
-        return paddle.add(x=inputs, y=conv2)
-
-
-class MiddleFlow(nn.Layer):
-    def __init__(self, block_num=8):
-        super(MiddleFlow, self).__init__()
-
-        self.block_num = block_num
-        self._conv_0 = MiddleFlowBottleneckBlock(
-            728, 728, name="middle_flow_0")
-        self._conv_1 = MiddleFlowBottleneckBlock(
-            728, 728, name="middle_flow_1")
-        self._conv_2 = MiddleFlowBottleneckBlock(
-            728, 728, name="middle_flow_2")
-        self._conv_3 = MiddleFlowBottleneckBlock(
-            728, 728, name="middle_flow_3")
-        self._conv_4 = MiddleFlowBottleneckBlock(
-            728, 728, name="middle_flow_4")
-        self._conv_5 = MiddleFlowBottleneckBlock(
-            728, 728, name="middle_flow_5")
-        self._conv_6 = MiddleFlowBottleneckBlock(
-            728, 728, name="middle_flow_6")
-        self._conv_7 = MiddleFlowBottleneckBlock(
-            728, 728, name="middle_flow_7")
-        if block_num == 16:
-            self._conv_8 = MiddleFlowBottleneckBlock(
-                728, 728, name="middle_flow_8")
-            self._conv_9 = MiddleFlowBottleneckBlock(
-                728, 728, name="middle_flow_9")
-            self._conv_10 = MiddleFlowBottleneckBlock(
-                728, 728, name="middle_flow_10")
-            self._conv_11 = MiddleFlowBottleneckBlock(
-                728, 728, name="middle_flow_11")
-            self._conv_12 = MiddleFlowBottleneckBlock(
-                728, 728, name="middle_flow_12")
-            self._conv_13 = MiddleFlowBottleneckBlock(
-                728, 728, name="middle_flow_13")
-            self._conv_14 = MiddleFlowBottleneckBlock(
-                728, 728, name="middle_flow_14")
-            self._conv_15 = MiddleFlowBottleneckBlock(
-                728, 728, name="middle_flow_15")
-
-    def forward(self, inputs):
-        x = self._conv_0(inputs)
-        x = self._conv_1(x)
-        x = self._conv_2(x)
-        x = self._conv_3(x)
-        x = self._conv_4(x)
-        x = self._conv_5(x)
-        x = self._conv_6(x)
-        x = self._conv_7(x)
-        if self.block_num == 16:
-            x = self._conv_8(x)
-            x = self._conv_9(x)
-            x = self._conv_10(x)
-            x = self._conv_11(x)
-            x = self._conv_12(x)
-            x = self._conv_13(x)
-            x = self._conv_14(x)
-            x = self._conv_15(x)
-        return x
-
-
-class ExitFlowBottleneckBlock(nn.Layer):
-    def __init__(self, input_channels, output_channels1, output_channels2,
-                 name):
-        super(ExitFlowBottleneckBlock, self).__init__()
-
-        self._short = Conv2D(
-            in_channels=input_channels,
-            out_channels=output_channels2,
-            kernel_size=1,
-            stride=2,
-            padding=0,
-            weight_attr=ParamAttr(name + "_branch1_weights"),
-            bias_attr=False)
-        self._conv_1 = SeparableConv(
-            input_channels,
-            output_channels1,
-            stride=1,
-            name=name + "_branch2a_weights")
-        self._conv_2 = SeparableConv(
-            output_channels1,
-            output_channels2,
-            stride=1,
-            name=name + "_branch2b_weights")
-        self._pool = MaxPool2D(kernel_size=3, stride=2, padding=1)
-
-    def forward(self, inputs):
-        short = self._short(inputs)
-        conv0 = F.relu(inputs)
-        conv1 = self._conv_1(conv0)
-        conv2 = F.relu(conv1)
-        conv2 = self._conv_2(conv2)
-        pool = self._pool(conv2)
-        return paddle.add(x=short, y=pool)
-
-
-class ExitFlow(nn.Layer):
-    def __init__(self, class_dim):
-        super(ExitFlow, self).__init__()
-
-        name = "exit_flow"
-
-        self._conv_0 = ExitFlowBottleneckBlock(
-            728, 728, 1024, name=name + "_1")
-        self._conv_1 = SeparableConv(1024, 1536, stride=1, name=name + "_2")
-        self._conv_2 = SeparableConv(1536, 2048, stride=1, name=name + "_3")
-        self._pool = AdaptiveAvgPool2D(1)
-        stdv = 1.0 / math.sqrt(2048 * 1.0)
-        self._out = Linear(
-            2048,
-            class_dim,
-            weight_attr=ParamAttr(
-                name="fc_weights", initializer=Uniform(-stdv, stdv)),
-            bias_attr=ParamAttr(name="fc_offset"))
-
-    def forward(self, inputs):
-        conv0 = self._conv_0(inputs)
-        conv1 = self._conv_1(conv0)
-        conv1 = F.relu(conv1)
-        conv2 = self._conv_2(conv1)
-        conv2 = F.relu(conv2)
-        pool = self._pool(conv2)
-        pool = paddle.flatten(pool, start_axis=1, stop_axis=-1)
-        out = self._out(pool)
-        return out
-
-
-class Xception(nn.Layer):
-    def __init__(self,
-                 entry_flow_block_num=3,
-                 middle_flow_block_num=8,
-                 class_dim=1000):
-        super(Xception, self).__init__()
-        self.entry_flow_block_num = entry_flow_block_num
-        self.middle_flow_block_num = middle_flow_block_num
-        self._entry_flow = EntryFlow(entry_flow_block_num)
-        self._middle_flow = MiddleFlow(middle_flow_block_num)
-        self._exit_flow = ExitFlow(class_dim)
-
-    def forward(self, inputs):
-        x = self._entry_flow(inputs)
-        x = self._middle_flow(x)
-        x = self._exit_flow(x)
-        return x
-
-
-def Xception41(**args):
-    model = Xception(entry_flow_block_num=3, middle_flow_block_num=8, **args)
-    return model
-
-
-def Xception65(**args):
-    model = Xception(entry_flow_block_num=3, middle_flow_block_num=16, **args)
-    return model
-
-
-def Xception71(**args):
-    model = Xception(entry_flow_block_num=5, middle_flow_block_num=16, **args)
-    return model

+ 0 - 99
dygraph/paddlex/cv/nets/ppcls/modeling/loss.py

@@ -1,99 +0,0 @@
-# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#    http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import paddle
-import paddle.nn.functional as F
-
-__all__ = ['CELoss', 'JSDivLoss']
-
-
-class Loss(object):
-    """
-    Loss
-    """
-
-    def __init__(self, class_dim=1000, epsilon=None):
-        assert class_dim > 1, "class_dim=%d is not larger than 1" % (class_dim)
-        self._class_dim = class_dim
-        if epsilon is not None and epsilon >= 0.0 and epsilon <= 1.0:
-            self._epsilon = epsilon
-            self._label_smoothing = True
-        else:
-            self._epsilon = None
-            self._label_smoothing = False
-
-    def _labelsmoothing(self, target):
-        if target.shape[-1] != self._class_dim:
-            one_hot_target = F.one_hot(target, self._class_dim)
-        else:
-            one_hot_target = target
-        soft_target = F.label_smooth(one_hot_target, epsilon=self._epsilon)
-        soft_target = paddle.reshape(soft_target, shape=[-1, self._class_dim])
-        return soft_target
-
-    def _crossentropy(self, input, target, use_pure_fp16=False):
-        if self._label_smoothing:
-            target = self._labelsmoothing(target)
-            input = -F.log_softmax(input, axis=-1)
-            cost = paddle.sum(target * input, axis=-1)
-        else:
-            cost = F.cross_entropy(input=input, label=target)
-        if use_pure_fp16:
-            avg_cost = paddle.sum(cost)
-        else:
-            avg_cost = paddle.mean(cost)
-        return avg_cost
-
-    def _kldiv(self, input, target, name=None):
-        eps = 1.0e-10
-        cost = target * paddle.log(
-            (target + eps) / (input + eps)) * self._class_dim
-        return cost
-
-    def _jsdiv(self, input, target):
-        input = F.softmax(input)
-        target = F.softmax(target)
-        cost = self._kldiv(input, target) + self._kldiv(target, input)
-        cost = cost / 2
-        avg_cost = paddle.mean(cost)
-        return avg_cost
-
-    def __call__(self, input, target):
-        pass
-
-
-class CELoss(Loss):
-    """
-    Cross entropy loss
-    """
-
-    def __init__(self, class_dim=1000, epsilon=None):
-        super(CELoss, self).__init__(class_dim, epsilon)
-
-    def __call__(self, input, target, use_pure_fp16=False):
-        cost = self._crossentropy(input, target, use_pure_fp16)
-        return cost
-
-
-class JSDivLoss(Loss):
-    """
-    JSDiv loss
-    """
-
-    def __init__(self, class_dim=1000, epsilon=None):
-        super(JSDivLoss, self).__init__(class_dim, epsilon)
-
-    def __call__(self, input, target):
-        cost = self._jsdiv(input, target)
-        return cost