fpn.py 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399
  1. # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. from __future__ import absolute_import
  15. from __future__ import division
  16. from __future__ import print_function
  17. from collections import OrderedDict
  18. import copy
  19. from paddle import fluid
  20. from paddle.fluid.param_attr import ParamAttr
  21. from paddle.fluid.initializer import Xavier
  22. from paddle.fluid.regularizer import L2Decay
  23. __all__ = ['FPN', 'HRFPN']
  24. def ConvNorm(input,
  25. num_filters,
  26. filter_size,
  27. stride=1,
  28. groups=1,
  29. norm_decay=0.,
  30. norm_type='affine_channel',
  31. norm_groups=32,
  32. dilation=1,
  33. lr_scale=1,
  34. freeze_norm=False,
  35. act=None,
  36. norm_name=None,
  37. initializer=None,
  38. name=None):
  39. fan = num_filters
  40. conv = fluid.layers.conv2d(
  41. input=input,
  42. num_filters=num_filters,
  43. filter_size=filter_size,
  44. stride=stride,
  45. padding=((filter_size - 1) // 2) * dilation,
  46. dilation=dilation,
  47. groups=groups,
  48. act=None,
  49. param_attr=ParamAttr(
  50. name=name + "_weights",
  51. initializer=initializer,
  52. learning_rate=lr_scale),
  53. bias_attr=False,
  54. name=name + '.conv2d.output.1')
  55. norm_lr = 0. if freeze_norm else 1.
  56. pattr = ParamAttr(
  57. name=norm_name + '_scale',
  58. learning_rate=norm_lr * lr_scale,
  59. regularizer=L2Decay(norm_decay))
  60. battr = ParamAttr(
  61. name=norm_name + '_offset',
  62. learning_rate=norm_lr * lr_scale,
  63. regularizer=L2Decay(norm_decay))
  64. if norm_type in ['bn', 'sync_bn']:
  65. global_stats = True if freeze_norm else False
  66. out = fluid.layers.batch_norm(
  67. input=conv,
  68. act=act,
  69. name=norm_name + '.output.1',
  70. param_attr=pattr,
  71. bias_attr=battr,
  72. moving_mean_name=norm_name + '_mean',
  73. moving_variance_name=norm_name + '_variance',
  74. use_global_stats=global_stats)
  75. scale = fluid.framework._get_var(pattr.name)
  76. bias = fluid.framework._get_var(battr.name)
  77. elif norm_type == 'gn':
  78. out = fluid.layers.group_norm(
  79. input=conv,
  80. act=act,
  81. name=norm_name + '.output.1',
  82. groups=norm_groups,
  83. param_attr=pattr,
  84. bias_attr=battr)
  85. scale = fluid.framework._get_var(pattr.name)
  86. bias = fluid.framework._get_var(battr.name)
  87. elif norm_type == 'affine_channel':
  88. scale = fluid.layers.create_parameter(
  89. shape=[conv.shape[1]],
  90. dtype=conv.dtype,
  91. attr=pattr,
  92. default_initializer=fluid.initializer.Constant(1.))
  93. bias = fluid.layers.create_parameter(
  94. shape=[conv.shape[1]],
  95. dtype=conv.dtype,
  96. attr=battr,
  97. default_initializer=fluid.initializer.Constant(0.))
  98. out = fluid.layers.affine_channel(
  99. x=conv, scale=scale, bias=bias, act=act)
  100. if freeze_norm:
  101. scale.stop_gradient = True
  102. bias.stop_gradient = True
  103. return out
  104. class FPN(object):
  105. """
  106. Feature Pyramid Network, see https://arxiv.org/abs/1612.03144
  107. Args:
  108. num_chan (int): number of feature channels
  109. min_level (int): lowest level of the backbone feature map to use
  110. max_level (int): highest level of the backbone feature map to use
  111. spatial_scale (list): feature map scaling factor
  112. has_extra_convs (bool): whether has extral convolutions in higher levels
  113. norm_type (str|None): normalization type, 'bn'/'sync_bn'/'affine_channel'
  114. """
  115. def __init__(self,
  116. num_chan=256,
  117. min_level=2,
  118. max_level=6,
  119. spatial_scale=[1. / 32., 1. / 16., 1. / 8., 1. / 4.],
  120. has_extra_convs=False,
  121. norm_type=None,
  122. freeze_norm=False):
  123. self.freeze_norm = freeze_norm
  124. self.num_chan = num_chan
  125. self.min_level = min_level
  126. self.max_level = max_level
  127. self.spatial_scale = spatial_scale
  128. self.has_extra_convs = has_extra_convs
  129. self.norm_type = norm_type
  130. def _add_topdown_lateral(self, body_name, body_input, upper_output):
  131. lateral_name = 'fpn_inner_' + body_name + '_lateral'
  132. topdown_name = 'fpn_topdown_' + body_name
  133. fan = body_input.shape[1]
  134. if self.norm_type:
  135. initializer = Xavier(fan_out=fan)
  136. lateral = ConvNorm(
  137. body_input,
  138. self.num_chan,
  139. 1,
  140. initializer=initializer,
  141. norm_type=self.norm_type,
  142. freeze_norm=self.freeze_norm,
  143. name=lateral_name,
  144. norm_name=lateral_name)
  145. else:
  146. lateral = fluid.layers.conv2d(
  147. body_input,
  148. self.num_chan,
  149. 1,
  150. param_attr=ParamAttr(
  151. name=lateral_name + "_w", initializer=Xavier(fan_out=fan)),
  152. bias_attr=ParamAttr(
  153. name=lateral_name + "_b",
  154. learning_rate=2.,
  155. regularizer=L2Decay(0.)),
  156. name=lateral_name)
  157. topdown = fluid.layers.resize_nearest(
  158. upper_output, scale=2., name=topdown_name)
  159. return lateral + topdown
  160. def get_output(self, body_dict):
  161. """
  162. Add FPN onto backbone.
  163. Args:
  164. body_dict(OrderedDict): Dictionary of variables and each element is the
  165. output of backbone.
  166. Return:
  167. fpn_dict(OrderedDict): A dictionary represents the output of FPN with
  168. their name.
  169. spatial_scale(list): A list of multiplicative spatial scale factor.
  170. """
  171. spatial_scale = copy.deepcopy(self.spatial_scale)
  172. body_name_list = list(body_dict.keys())[::-1]
  173. num_backbone_stages = len(body_name_list)
  174. self.fpn_inner_output = [[] for _ in range(num_backbone_stages)]
  175. fpn_inner_name = 'fpn_inner_' + body_name_list[0]
  176. body_input = body_dict[body_name_list[0]]
  177. fan = body_input.shape[1]
  178. if self.norm_type:
  179. initializer = Xavier(fan_out=fan)
  180. self.fpn_inner_output[0] = ConvNorm(
  181. body_input,
  182. self.num_chan,
  183. 1,
  184. initializer=initializer,
  185. norm_type=self.norm_type,
  186. freeze_norm=self.freeze_norm,
  187. name=fpn_inner_name,
  188. norm_name=fpn_inner_name)
  189. else:
  190. self.fpn_inner_output[0] = fluid.layers.conv2d(
  191. body_input,
  192. self.num_chan,
  193. 1,
  194. param_attr=ParamAttr(
  195. name=fpn_inner_name + "_w",
  196. initializer=Xavier(fan_out=fan)),
  197. bias_attr=ParamAttr(
  198. name=fpn_inner_name + "_b",
  199. learning_rate=2.,
  200. regularizer=L2Decay(0.)),
  201. name=fpn_inner_name)
  202. for i in range(1, num_backbone_stages):
  203. body_name = body_name_list[i]
  204. body_input = body_dict[body_name]
  205. top_output = self.fpn_inner_output[i - 1]
  206. fpn_inner_single = self._add_topdown_lateral(body_name, body_input,
  207. top_output)
  208. self.fpn_inner_output[i] = fpn_inner_single
  209. fpn_dict = {}
  210. fpn_name_list = []
  211. for i in range(num_backbone_stages):
  212. fpn_name = 'fpn_' + body_name_list[i]
  213. fan = self.fpn_inner_output[i].shape[1] * 3 * 3
  214. if self.norm_type:
  215. initializer = Xavier(fan_out=fan)
  216. fpn_output = ConvNorm(
  217. self.fpn_inner_output[i],
  218. self.num_chan,
  219. 3,
  220. initializer=initializer,
  221. norm_type=self.norm_type,
  222. freeze_norm=self.freeze_norm,
  223. name=fpn_name,
  224. norm_name=fpn_name)
  225. else:
  226. fpn_output = fluid.layers.conv2d(
  227. self.fpn_inner_output[i],
  228. self.num_chan,
  229. filter_size=3,
  230. padding=1,
  231. param_attr=ParamAttr(
  232. name=fpn_name + "_w", initializer=Xavier(fan_out=fan)),
  233. bias_attr=ParamAttr(
  234. name=fpn_name + "_b",
  235. learning_rate=2.,
  236. regularizer=L2Decay(0.)),
  237. name=fpn_name)
  238. fpn_dict[fpn_name] = fpn_output
  239. fpn_name_list.append(fpn_name)
  240. if not self.has_extra_convs and self.max_level - self.min_level == len(
  241. spatial_scale):
  242. body_top_name = fpn_name_list[0]
  243. body_top_extension = fluid.layers.pool2d(
  244. fpn_dict[body_top_name],
  245. 1,
  246. 'max',
  247. pool_stride=2,
  248. name=body_top_name + '_subsampled_2x')
  249. fpn_dict[body_top_name + '_subsampled_2x'] = body_top_extension
  250. fpn_name_list.insert(0, body_top_name + '_subsampled_2x')
  251. spatial_scale.insert(0, spatial_scale[0] * 0.5)
  252. # Coarser FPN levels introduced for RetinaNet
  253. highest_backbone_level = self.min_level + len(spatial_scale) - 1
  254. if self.has_extra_convs and self.max_level > highest_backbone_level:
  255. fpn_blob = body_dict[body_name_list[0]]
  256. for i in range(highest_backbone_level + 1, self.max_level + 1):
  257. fpn_blob_in = fpn_blob
  258. fpn_name = 'fpn_' + str(i)
  259. if i > highest_backbone_level + 1:
  260. fpn_blob_in = fluid.layers.relu(fpn_blob)
  261. fan = fpn_blob_in.shape[1] * 3 * 3
  262. fpn_blob = fluid.layers.conv2d(
  263. input=fpn_blob_in,
  264. num_filters=self.num_chan,
  265. filter_size=3,
  266. stride=2,
  267. padding=1,
  268. param_attr=ParamAttr(
  269. name=fpn_name + "_w", initializer=Xavier(fan_out=fan)),
  270. bias_attr=ParamAttr(
  271. name=fpn_name + "_b",
  272. learning_rate=2.,
  273. regularizer=L2Decay(0.)),
  274. name=fpn_name)
  275. fpn_dict[fpn_name] = fpn_blob
  276. fpn_name_list.insert(0, fpn_name)
  277. spatial_scale.insert(0, spatial_scale[0] * 0.5)
  278. res_dict = OrderedDict([(k, fpn_dict[k]) for k in fpn_name_list])
  279. return res_dict, spatial_scale
  280. class HRFPN(object):
  281. """
  282. HRNet, see https://arxiv.org/abs/1908.07919
  283. Args:
  284. num_chan (int): number of feature channels
  285. pooling_type (str): pooling type of downsampling
  286. share_conv (bool): whethet to share conv for different layers' reduction
  287. spatial_scale (list): feature map scaling factor
  288. """
  289. def __init__(
  290. self,
  291. num_chan=256,
  292. pooling_type="avg",
  293. share_conv=False,
  294. spatial_scale=[1. / 64, 1. / 32, 1. / 16, 1. / 8, 1. / 4], ):
  295. self.num_chan = num_chan
  296. self.pooling_type = pooling_type
  297. self.share_conv = share_conv
  298. self.spatial_scale = spatial_scale
  299. def get_output(self, body_dict):
  300. num_out = len(self.spatial_scale)
  301. body_name_list = list(body_dict.keys())
  302. num_backbone_stages = len(body_name_list)
  303. outs = []
  304. outs.append(body_dict[body_name_list[0]])
  305. # resize
  306. for i in range(1, len(body_dict)):
  307. resized = self.resize_input_tensor(body_dict[body_name_list[i]],
  308. outs[0], 2**i)
  309. outs.append(resized)
  310. # concat
  311. out = fluid.layers.concat(outs, axis=1)
  312. # reduction
  313. out = fluid.layers.conv2d(
  314. input=out,
  315. num_filters=self.num_chan,
  316. filter_size=1,
  317. stride=1,
  318. padding=0,
  319. param_attr=ParamAttr(name='hrfpn_reduction_weights'),
  320. bias_attr=False)
  321. # conv
  322. outs = [out]
  323. for i in range(1, num_out):
  324. outs.append(
  325. self.pooling(
  326. out,
  327. size=2**i,
  328. stride=2**i,
  329. pooling_type=self.pooling_type))
  330. outputs = []
  331. for i in range(num_out):
  332. conv_name = "shared_fpn_conv" if self.share_conv else "shared_fpn_conv_" + str(
  333. i)
  334. conv = fluid.layers.conv2d(
  335. input=outs[i],
  336. num_filters=self.num_chan,
  337. filter_size=3,
  338. stride=1,
  339. padding=1,
  340. param_attr=ParamAttr(name=conv_name + "_weights"),
  341. bias_attr=False)
  342. outputs.append(conv)
  343. for idx in range(0, num_out - len(body_name_list)):
  344. body_name_list.append("fpn_res5_sum_subsampled_{}x".format(2**(
  345. idx + 1)))
  346. outputs = outputs[::-1]
  347. body_name_list = body_name_list[::-1]
  348. res_dict = OrderedDict([(body_name_list[k], outputs[k])
  349. for k in range(len(body_name_list))])
  350. return res_dict, self.spatial_scale
  351. def resize_input_tensor(self, body_input, ref_output, scale):
  352. shape = fluid.layers.shape(ref_output)
  353. shape_hw = fluid.layers.slice(shape, axes=[0], starts=[2], ends=[4])
  354. out_shape_ = shape_hw
  355. out_shape = fluid.layers.cast(out_shape_, dtype='int32')
  356. out_shape.stop_gradient = True
  357. body_output = fluid.layers.resize_bilinear(
  358. body_input, scale=scale, out_shape=out_shape)
  359. return body_output
  360. def pooling(self, input, size, stride, pooling_type):
  361. pool = fluid.layers.pool2d(
  362. input=input,
  363. pool_size=size,
  364. pool_stride=stride,
  365. pool_type=pooling_type)
  366. return pool