resnet.py 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505
  1. # copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. from __future__ import absolute_import
  15. from __future__ import division
  16. from __future__ import print_function
  17. import math
  18. from collections import OrderedDict
  19. import paddle
  20. import paddle.fluid as fluid
  21. from paddle.fluid.param_attr import ParamAttr
  22. from paddle.fluid.framework import Variable
  23. from paddle.fluid.regularizer import L2Decay
  24. from paddle.fluid.initializer import Constant
  25. from numbers import Integral
  26. from .backbone_utils import NameAdapter
  27. __all__ = ['ResNet', 'ResNetC5']
  28. class ResNet(object):
  29. """
  30. Residual Network, see https://arxiv.org/abs/1512.03385
  31. Args:
  32. layers (int): ResNet layers, should be 18, 34, 50, 101, 152.
  33. freeze_at (int): freeze the backbone at which stage
  34. norm_type (str): normalization type, 'bn'/'sync_bn'/'affine_channel'
  35. freeze_norm (bool): freeze normalization layers
  36. norm_decay (float): weight decay for normalization layer weights
  37. variant (str): ResNet variant, supports 'a', 'b', 'c', 'd' currently
  38. feature_maps (list): index of stages whose feature maps are returned
  39. dcn_v2_stages (list): index of stages who select deformable conv v2
  40. nonlocal_stages (list): index of stages who select nonlocal networks
  41. gcb_stages (list): index of stages who select gc blocks
  42. gcb_params (dict): gc blocks config, includes ratio(default as 1.0/16),
  43. pooling_type(default as "att") and
  44. fusion_types(default as ['channel_add'])
  45. """
  46. def __init__(self,
  47. layers=50,
  48. freeze_at=0,
  49. norm_type='bn',
  50. freeze_norm=False,
  51. norm_decay=0.,
  52. variant='b',
  53. feature_maps=[2, 3, 4, 5],
  54. dcn_v2_stages=[],
  55. weight_prefix_name='',
  56. nonlocal_stages=[],
  57. gcb_stages=[],
  58. gcb_params=dict(),
  59. num_classes=None,
  60. lr_mult_list=[1.0, 1.0, 1.0, 1.0, 1.0]):
  61. super(ResNet, self).__init__()
  62. if isinstance(feature_maps, Integral):
  63. feature_maps = [feature_maps]
  64. assert layers in [18, 34, 50, 101, 152, 200], \
  65. "layers {} not in [18, 34, 50, 101, 152, 200]"
  66. assert variant in ['a', 'b', 'c', 'd'], "invalid ResNet variant"
  67. assert 0 <= freeze_at <= 5, "freeze_at should be 0, 1, 2, 3, 4 or 5"
  68. assert len(feature_maps) > 0, "need one or more feature maps"
  69. assert norm_type in ['bn', 'sync_bn', 'affine_channel']
  70. assert not (len(nonlocal_stages)>0 and layers<50), \
  71. "non-local is not supported for resnet18 or resnet34"
  72. assert len(
  73. lr_mult_list
  74. ) == 5, "lr_mult_list length in ResNet must be 5 but got {}!!".format(
  75. len(lr_mult_list))
  76. self.layers = layers
  77. self.freeze_at = freeze_at
  78. self.norm_type = norm_type
  79. self.norm_decay = norm_decay
  80. self.freeze_norm = freeze_norm
  81. self.variant = variant
  82. self._model_type = 'ResNet'
  83. self.feature_maps = feature_maps
  84. self.dcn_v2_stages = dcn_v2_stages
  85. self.layers_cfg = {
  86. 18: ([2, 2, 2, 2], self.basicblock),
  87. 34: ([3, 4, 6, 3], self.basicblock),
  88. 50: ([3, 4, 6, 3], self.bottleneck),
  89. 101: ([3, 4, 23, 3], self.bottleneck),
  90. 152: ([3, 8, 36, 3], self.bottleneck),
  91. 200: ([3, 12, 48, 3], self.bottleneck),
  92. }
  93. self.stage_filters = [64, 128, 256, 512]
  94. self._c1_out_chan_num = 64
  95. self.na = NameAdapter(self)
  96. self.prefix_name = weight_prefix_name
  97. self.nonlocal_stages = nonlocal_stages
  98. self.nonlocal_mod_cfg = {
  99. 50: 2,
  100. 101: 5,
  101. 152: 8,
  102. 200: 12,
  103. }
  104. self.gcb_stages = gcb_stages
  105. self.gcb_params = gcb_params
  106. self.num_classes = num_classes
  107. self.lr_mult_list = lr_mult_list
  108. self.curr_stage = 0
  109. def _conv_offset(self,
  110. input,
  111. filter_size,
  112. stride,
  113. padding,
  114. act=None,
  115. name=None):
  116. out_channel = filter_size * filter_size * 3
  117. out = fluid.layers.conv2d(
  118. input,
  119. num_filters=out_channel,
  120. filter_size=filter_size,
  121. stride=stride,
  122. padding=padding,
  123. param_attr=ParamAttr(
  124. initializer=Constant(0.0), name=name + ".w_0"),
  125. bias_attr=ParamAttr(
  126. initializer=Constant(0.0), name=name + ".b_0"),
  127. act=act,
  128. name=name)
  129. return out
  130. def _conv_norm(self,
  131. input,
  132. num_filters,
  133. filter_size,
  134. stride=1,
  135. groups=1,
  136. act=None,
  137. name=None,
  138. dcn_v2=False,
  139. use_lr_mult_list=False):
  140. lr_mult = self.lr_mult_list[
  141. self.curr_stage] if use_lr_mult_list else 1.0
  142. _name = self.prefix_name + name if self.prefix_name != '' else name
  143. if not dcn_v2:
  144. conv = fluid.layers.conv2d(
  145. input=input,
  146. num_filters=num_filters,
  147. filter_size=filter_size,
  148. stride=stride,
  149. padding=(filter_size - 1) // 2,
  150. groups=groups,
  151. act=None,
  152. param_attr=ParamAttr(
  153. name=_name + "_weights", learning_rate=lr_mult),
  154. bias_attr=False,
  155. name=_name + '.conv2d.output.1')
  156. else:
  157. # select deformable conv"
  158. offset_mask = self._conv_offset(
  159. input=input,
  160. filter_size=filter_size,
  161. stride=stride,
  162. padding=(filter_size - 1) // 2,
  163. act=None,
  164. name=_name + "_conv_offset")
  165. offset_channel = filter_size**2 * 2
  166. mask_channel = filter_size**2
  167. offset, mask = fluid.layers.split(
  168. input=offset_mask,
  169. num_or_sections=[offset_channel, mask_channel],
  170. dim=1)
  171. mask = fluid.layers.sigmoid(mask)
  172. conv = fluid.layers.deformable_conv(
  173. input=input,
  174. offset=offset,
  175. mask=mask,
  176. num_filters=num_filters,
  177. filter_size=filter_size,
  178. stride=stride,
  179. padding=(filter_size - 1) // 2,
  180. groups=groups,
  181. deformable_groups=1,
  182. im2col_step=1,
  183. param_attr=ParamAttr(name=_name + "_weights"),
  184. bias_attr=False,
  185. name=_name + ".conv2d.output.1")
  186. bn_name = self.na.fix_conv_norm_name(name)
  187. bn_name = self.prefix_name + bn_name if self.prefix_name != '' else bn_name
  188. norm_lr = 0. if self.freeze_norm else lr_mult
  189. norm_decay = self.norm_decay
  190. if self.num_classes:
  191. regularizer = None
  192. else:
  193. regularizer = L2Decay(norm_decay)
  194. pattr = ParamAttr(
  195. name=bn_name + '_scale',
  196. learning_rate=norm_lr,
  197. regularizer=regularizer)
  198. battr = ParamAttr(
  199. name=bn_name + '_offset',
  200. learning_rate=norm_lr,
  201. regularizer=regularizer)
  202. if self.norm_type in ['bn', 'sync_bn']:
  203. global_stats = True if self.freeze_norm else False
  204. out = fluid.layers.batch_norm(
  205. input=conv,
  206. act=act,
  207. name=bn_name + '.output.1',
  208. param_attr=pattr,
  209. bias_attr=battr,
  210. moving_mean_name=bn_name + '_mean',
  211. moving_variance_name=bn_name + '_variance',
  212. use_global_stats=global_stats)
  213. scale = fluid.framework._get_var(pattr.name)
  214. bias = fluid.framework._get_var(battr.name)
  215. elif self.norm_type == 'affine_channel':
  216. scale = fluid.layers.create_parameter(
  217. shape=[conv.shape[1]],
  218. dtype=conv.dtype,
  219. attr=pattr,
  220. default_initializer=fluid.initializer.Constant(1.))
  221. bias = fluid.layers.create_parameter(
  222. shape=[conv.shape[1]],
  223. dtype=conv.dtype,
  224. attr=battr,
  225. default_initializer=fluid.initializer.Constant(0.))
  226. out = fluid.layers.affine_channel(
  227. x=conv, scale=scale, bias=bias, act=act)
  228. if self.freeze_norm:
  229. scale.stop_gradient = True
  230. bias.stop_gradient = True
  231. return out
  232. def _shortcut(self, input, ch_out, stride, is_first, name):
  233. max_pooling_in_short_cut = self.variant == 'd'
  234. ch_in = input.shape[1]
  235. # the naming rule is same as pretrained weight
  236. name = self.na.fix_shortcut_name(name)
  237. std_senet = getattr(self, 'std_senet', False)
  238. if ch_in != ch_out or stride != 1 or (self.layers < 50 and is_first):
  239. if std_senet:
  240. if is_first:
  241. return self._conv_norm(input, ch_out, 1, stride, name=name)
  242. else:
  243. return self._conv_norm(input, ch_out, 3, stride, name=name)
  244. if max_pooling_in_short_cut and not is_first:
  245. input = fluid.layers.pool2d(
  246. input=input,
  247. pool_size=2,
  248. pool_stride=2,
  249. pool_padding=0,
  250. ceil_mode=True,
  251. pool_type='avg')
  252. return self._conv_norm(
  253. input, ch_out, 1, 1, name=name, use_lr_mult_list=True)
  254. return self._conv_norm(input, ch_out, 1, stride, name=name)
  255. else:
  256. return input
  257. def bottleneck(self,
  258. input,
  259. num_filters,
  260. stride,
  261. is_first,
  262. name,
  263. dcn_v2=False,
  264. gcb=False,
  265. gcb_name=None):
  266. if self.variant == 'a':
  267. stride1, stride2 = stride, 1
  268. else:
  269. stride1, stride2 = 1, stride
  270. # ResNeXt
  271. groups = getattr(self, 'groups', 1)
  272. group_width = getattr(self, 'group_width', -1)
  273. if groups == 1:
  274. expand = 4
  275. elif (groups * group_width) == 256:
  276. expand = 1
  277. else: # FIXME hard code for now, handles 32x4d, 64x4d and 32x8d
  278. num_filters = num_filters // 2
  279. expand = 2
  280. conv_name1, conv_name2, conv_name3, \
  281. shortcut_name = self.na.fix_bottleneck_name(name)
  282. std_senet = getattr(self, 'std_senet', False)
  283. if std_senet:
  284. conv_def = [[
  285. int(num_filters / 2), 1, stride1, 'relu', 1, conv_name1
  286. ], [num_filters, 3, stride2, 'relu', groups, conv_name2],
  287. [num_filters * expand, 1, 1, None, 1, conv_name3]]
  288. else:
  289. conv_def = [[num_filters, 1, stride1, 'relu', 1, conv_name1],
  290. [num_filters, 3, stride2, 'relu', groups, conv_name2],
  291. [num_filters * expand, 1, 1, None, 1, conv_name3]]
  292. residual = input
  293. for i, (c, k, s, act, g, _name) in enumerate(conv_def):
  294. residual = self._conv_norm(
  295. input=residual,
  296. num_filters=c,
  297. filter_size=k,
  298. stride=s,
  299. act=act,
  300. groups=g,
  301. name=_name,
  302. dcn_v2=(i == 1 and dcn_v2))
  303. short = self._shortcut(
  304. input,
  305. num_filters * expand,
  306. stride,
  307. is_first=is_first,
  308. name=shortcut_name)
  309. # Squeeze-and-Excitation
  310. if callable(getattr(self, '_squeeze_excitation', None)):
  311. residual = self._squeeze_excitation(
  312. input=residual, num_channels=num_filters, name='fc' + name)
  313. if gcb:
  314. residual = add_gc_block(residual, name=gcb_name, **self.gcb_params)
  315. return fluid.layers.elementwise_add(
  316. x=short, y=residual, act='relu', name=name + ".add.output.5")
  317. def basicblock(self,
  318. input,
  319. num_filters,
  320. stride,
  321. is_first,
  322. name,
  323. dcn_v2=False,
  324. gcb=False,
  325. gcb_name=None):
  326. assert dcn_v2 is False, "Not implemented yet."
  327. assert gcb is False, "Not implemented yet."
  328. conv0 = self._conv_norm(
  329. input=input,
  330. num_filters=num_filters,
  331. filter_size=3,
  332. act='relu',
  333. stride=stride,
  334. name=name + "_branch2a")
  335. conv1 = self._conv_norm(
  336. input=conv0,
  337. num_filters=num_filters,
  338. filter_size=3,
  339. act=None,
  340. name=name + "_branch2b")
  341. short = self._shortcut(
  342. input, num_filters, stride, is_first, name=name + "_branch1")
  343. return fluid.layers.elementwise_add(x=short, y=conv1, act='relu')
  344. def layer_warp(self, input, stage_num):
  345. """
  346. Args:
  347. input (Variable): input variable.
  348. stage_num (int): the stage number, should be 2, 3, 4, 5
  349. Returns:
  350. The last variable in endpoint-th stage.
  351. """
  352. assert stage_num in [2, 3, 4, 5]
  353. stages, block_func = self.layers_cfg[self.layers]
  354. count = stages[stage_num - 2]
  355. ch_out = self.stage_filters[stage_num - 2]
  356. is_first = False if stage_num != 2 else True
  357. dcn_v2 = True if stage_num in self.dcn_v2_stages else False
  358. nonlocal_mod = 1000
  359. if stage_num in self.nonlocal_stages:
  360. nonlocal_mod = self.nonlocal_mod_cfg[
  361. self.layers] if stage_num == 4 else 2
  362. # Make the layer name and parameter name consistent
  363. # with ImageNet pre-trained model
  364. conv = input
  365. for i in range(count):
  366. conv_name = self.na.fix_layer_warp_name(stage_num, count, i)
  367. if self.layers < 50:
  368. is_first = True if i == 0 and stage_num == 2 else False
  369. gcb = stage_num in self.gcb_stages
  370. gcb_name = "gcb_res{}_b{}".format(stage_num, i)
  371. conv = block_func(
  372. input=conv,
  373. num_filters=ch_out,
  374. stride=2 if i == 0 and stage_num != 2 else 1,
  375. is_first=is_first,
  376. name=conv_name,
  377. dcn_v2=dcn_v2,
  378. gcb=gcb,
  379. gcb_name=gcb_name)
  380. # add non local model
  381. dim_in = conv.shape[1]
  382. nonlocal_name = "nonlocal_conv{}".format(stage_num)
  383. if i % nonlocal_mod == nonlocal_mod - 1:
  384. conv = add_space_nonlocal(conv, dim_in, dim_in,
  385. nonlocal_name + '_{}'.format(i),
  386. int(dim_in / 2))
  387. return conv
  388. def c1_stage(self, input):
  389. out_chan = self._c1_out_chan_num
  390. conv1_name = self.na.fix_c1_stage_name()
  391. if self.variant in ['c', 'd']:
  392. conv_def = [
  393. [out_chan // 2, 3, 2, "conv1_1"],
  394. [out_chan // 2, 3, 1, "conv1_2"],
  395. [out_chan, 3, 1, "conv1_3"],
  396. ]
  397. else:
  398. conv_def = [[out_chan, 7, 2, conv1_name]]
  399. for (c, k, s, _name) in conv_def:
  400. input = self._conv_norm(
  401. input=input,
  402. num_filters=c,
  403. filter_size=k,
  404. stride=s,
  405. act='relu',
  406. name=_name)
  407. output = fluid.layers.pool2d(
  408. input=input,
  409. pool_size=3,
  410. pool_stride=2,
  411. pool_padding=1,
  412. pool_type='max')
  413. return output
  414. def __call__(self, input):
  415. assert isinstance(input, Variable)
  416. assert not (set(self.feature_maps) - set([1, 2, 3, 4, 5])), \
  417. "feature maps {} not in [1, 2, 3, 4, 5]".format(self.feature_maps)
  418. res_endpoints = []
  419. res = input
  420. feature_maps = self.feature_maps
  421. severed_head = getattr(self, 'severed_head', False)
  422. if not severed_head:
  423. res = self.c1_stage(res)
  424. feature_maps = range(2, max(self.feature_maps) + 1)
  425. for i in feature_maps:
  426. self.curr_stage += 1
  427. res = self.layer_warp(res, i)
  428. if i in self.feature_maps:
  429. res_endpoints.append(res)
  430. if self.freeze_at >= i:
  431. res.stop_gradient = True
  432. if self.num_classes is not None:
  433. pool = fluid.layers.pool2d(
  434. input=res, pool_type='avg', global_pooling=True)
  435. stdv = 1.0 / math.sqrt(pool.shape[1] * 1.0)
  436. out = fluid.layers.fc(
  437. input=pool,
  438. size=self.num_classes,
  439. param_attr=fluid.param_attr.ParamAttr(
  440. initializer=fluid.initializer.Uniform(-stdv, stdv)))
  441. return out
  442. return OrderedDict([('res{}_sum'.format(self.feature_maps[idx]), feat)
  443. for idx, feat in enumerate(res_endpoints)])
  444. class ResNetC5(ResNet):
  445. __doc__ = ResNet.__doc__
  446. def __init__(self,
  447. layers=50,
  448. freeze_at=2,
  449. norm_type='affine_channel',
  450. freeze_norm=True,
  451. norm_decay=0.,
  452. variant='b',
  453. feature_maps=[5],
  454. weight_prefix_name=''):
  455. super(ResNetC5,
  456. self).__init__(layers, freeze_at, norm_type, freeze_norm,
  457. norm_decay, variant, feature_maps)
  458. self.severed_head = True