centernet_fpn.py 6.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185
  1. # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. import numpy as np
  15. import math
  16. import paddle
  17. import paddle.nn.functional as F
  18. from paddle import ParamAttr
  19. import paddle.nn as nn
  20. from paddle.nn.initializer import KaimingUniform
  21. from paddlex.ppdet.core.workspace import register, serializable
  22. from paddlex.ppdet.modeling.layers import ConvNormLayer
  23. from ..shape_spec import ShapeSpec
  24. def fill_up_weights(up):
  25. weight = up.weight
  26. f = math.ceil(weight.shape[2] / 2)
  27. c = (2 * f - 1 - f % 2) / (2. * f)
  28. for i in range(weight.shape[2]):
  29. for j in range(weight.shape[3]):
  30. weight[0, 0, i, j] = \
  31. (1 - math.fabs(i / f - c)) * (1 - math.fabs(j / f - c))
  32. for c in range(1, weight.shape[0]):
  33. weight[c, 0, :, :] = weight[0, 0, :, :]
  34. class IDAUp(nn.Layer):
  35. def __init__(self, ch_ins, ch_out, up_strides, dcn_v2=True):
  36. super(IDAUp, self).__init__()
  37. for i in range(1, len(ch_ins)):
  38. ch_in = ch_ins[i]
  39. up_s = int(up_strides[i])
  40. proj = nn.Sequential(
  41. ConvNormLayer(
  42. ch_in,
  43. ch_out,
  44. filter_size=3,
  45. stride=1,
  46. use_dcn=dcn_v2,
  47. bias_on=dcn_v2,
  48. norm_decay=None,
  49. dcn_lr_scale=1.,
  50. dcn_regularizer=None),
  51. nn.ReLU())
  52. node = nn.Sequential(
  53. ConvNormLayer(
  54. ch_out,
  55. ch_out,
  56. filter_size=3,
  57. stride=1,
  58. use_dcn=dcn_v2,
  59. bias_on=dcn_v2,
  60. norm_decay=None,
  61. dcn_lr_scale=1.,
  62. dcn_regularizer=None),
  63. nn.ReLU())
  64. param_attr = paddle.ParamAttr(initializer=KaimingUniform())
  65. up = nn.Conv2DTranspose(
  66. ch_out,
  67. ch_out,
  68. kernel_size=up_s * 2,
  69. weight_attr=param_attr,
  70. stride=up_s,
  71. padding=up_s // 2,
  72. groups=ch_out,
  73. bias_attr=False)
  74. # TODO: uncomment fill_up_weights
  75. #fill_up_weights(up)
  76. setattr(self, 'proj_' + str(i), proj)
  77. setattr(self, 'up_' + str(i), up)
  78. setattr(self, 'node_' + str(i), node)
  79. def forward(self, inputs, start_level, end_level):
  80. for i in range(start_level + 1, end_level):
  81. upsample = getattr(self, 'up_' + str(i - start_level))
  82. project = getattr(self, 'proj_' + str(i - start_level))
  83. inputs[i] = project(inputs[i])
  84. inputs[i] = upsample(inputs[i])
  85. node = getattr(self, 'node_' + str(i - start_level))
  86. inputs[i] = node(paddle.add(inputs[i], inputs[i - 1]))
  87. class DLAUp(nn.Layer):
  88. def __init__(self, start_level, channels, scales, ch_in=None, dcn_v2=True):
  89. super(DLAUp, self).__init__()
  90. self.start_level = start_level
  91. if ch_in is None:
  92. ch_in = channels
  93. self.channels = channels
  94. channels = list(channels)
  95. scales = np.array(scales, dtype=int)
  96. for i in range(len(channels) - 1):
  97. j = -i - 2
  98. setattr(
  99. self,
  100. 'ida_{}'.format(i),
  101. IDAUp(
  102. ch_in[j:],
  103. channels[j],
  104. scales[j:] // scales[j],
  105. dcn_v2=dcn_v2))
  106. scales[j + 1:] = scales[j]
  107. ch_in[j + 1:] = [channels[j] for _ in channels[j + 1:]]
  108. def forward(self, inputs):
  109. out = [inputs[-1]] # start with 32
  110. for i in range(len(inputs) - self.start_level - 1):
  111. ida = getattr(self, 'ida_{}'.format(i))
  112. ida(inputs, len(inputs) - i - 2, len(inputs))
  113. out.insert(0, inputs[-1])
  114. return out
  115. @register
  116. @serializable
  117. class CenterNetDLAFPN(nn.Layer):
  118. """
  119. Args:
  120. in_channels (list): number of input feature channels from backbone.
  121. [16, 32, 64, 128, 256, 512] by default, means the channels of DLA-34
  122. down_ratio (int): the down ratio from images to heatmap, 4 by default
  123. last_level (int): the last level of input feature fed into the upsamplng block
  124. out_channel (int): the channel of the output feature, 0 by default means
  125. the channel of the input feature whose down ratio is `down_ratio`
  126. dcn_v2 (bool): whether use the DCNv2, true by default
  127. """
  128. def __init__(self,
  129. in_channels,
  130. down_ratio=4,
  131. last_level=5,
  132. out_channel=0,
  133. dcn_v2=True):
  134. super(CenterNetDLAFPN, self).__init__()
  135. self.first_level = int(np.log2(down_ratio))
  136. self.down_ratio = down_ratio
  137. self.last_level = last_level
  138. scales = [2**i for i in range(len(in_channels[self.first_level:]))]
  139. self.dla_up = DLAUp(
  140. self.first_level,
  141. in_channels[self.first_level:],
  142. scales,
  143. dcn_v2=dcn_v2)
  144. self.out_channel = out_channel
  145. if out_channel == 0:
  146. self.out_channel = in_channels[self.first_level]
  147. self.ida_up = IDAUp(
  148. in_channels[self.first_level:self.last_level],
  149. self.out_channel,
  150. [2**i for i in range(self.last_level - self.first_level)],
  151. dcn_v2=dcn_v2)
  152. @classmethod
  153. def from_config(cls, cfg, input_shape):
  154. return {'in_channels': [i.channels for i in input_shape]}
  155. def forward(self, body_feats):
  156. dla_up_feats = self.dla_up(body_feats)
  157. ida_up_feats = []
  158. for i in range(self.last_level - self.first_level):
  159. ida_up_feats.append(dla_up_feats[i].clone())
  160. self.ida_up(ida_up_feats, 0, len(ida_up_feats))
  161. return ida_up_feats[-1]
  162. @property
  163. def out_shape(self):
  164. return [ShapeSpec(channels=self.out_channel, stride=self.down_ratio)]