hrnet.py 7.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207
  1. # coding: utf8
  2. # copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
  3. #
  4. # Licensed under the Apache License, Version 2.0 (the "License");
  5. # you may not use this file except in compliance with the License.
  6. # You may obtain a copy of the License at
  7. #
  8. # http://www.apache.org/licenses/LICENSE-2.0
  9. #
  10. # Unless required by applicable law or agreed to in writing, software
  11. # distributed under the License is distributed on an "AS IS" BASIS,
  12. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. # See the License for the specific language governing permissions and
  14. # limitations under the License.
  15. from __future__ import absolute_import
  16. from __future__ import division
  17. from __future__ import print_function
  18. from collections import OrderedDict
  19. import paddle.fluid as fluid
  20. from paddle.fluid.initializer import MSRA
  21. from paddle.fluid.param_attr import ParamAttr
  22. from .model_utils.libs import sigmoid_to_softmax
  23. from .model_utils.loss import softmax_with_loss
  24. from .model_utils.loss import dice_loss
  25. from .model_utils.loss import bce_loss
  26. import paddlex
  27. import paddlex.utils.logging as logging
  28. class HRNet(object):
  29. def __init__(self,
  30. num_classes,
  31. mode='train',
  32. width=18,
  33. use_bce_loss=False,
  34. use_dice_loss=False,
  35. class_weight=None,
  36. ignore_index=255,
  37. fixed_input_shape=None):
  38. # dice_loss或bce_loss只适用两类分割中
  39. if num_classes > 2 and (use_bce_loss or use_dice_loss):
  40. raise ValueError(
  41. "dice loss and bce loss is only applicable to binary classfication"
  42. )
  43. if class_weight is not None:
  44. if isinstance(class_weight, list):
  45. if len(class_weight) != num_classes:
  46. raise ValueError(
  47. "Length of class_weight should be equal to number of classes"
  48. )
  49. elif isinstance(class_weight, str):
  50. if class_weight.lower() != 'dynamic':
  51. raise ValueError(
  52. "if class_weight is string, must be dynamic!")
  53. else:
  54. raise TypeError(
  55. 'Expect class_weight is a list or string but receive {}'.
  56. format(type(class_weight)))
  57. self.num_classes = num_classes
  58. self.mode = mode
  59. self.use_bce_loss = use_bce_loss
  60. self.use_dice_loss = use_dice_loss
  61. self.class_weight = class_weight
  62. self.ignore_index = ignore_index
  63. self.fixed_input_shape = fixed_input_shape
  64. self.backbone = paddlex.cv.nets.hrnet.HRNet(
  65. width=width, feature_maps="stage4")
  66. def build_net(self, inputs):
  67. if self.use_dice_loss or self.use_bce_loss:
  68. self.num_classes = 1
  69. image = inputs['image']
  70. st4 = self.backbone(image)
  71. # upsample
  72. shape = fluid.layers.shape(st4[0])[-2:]
  73. st4[1] = fluid.layers.resize_bilinear(st4[1], out_shape=shape)
  74. st4[2] = fluid.layers.resize_bilinear(st4[2], out_shape=shape)
  75. st4[3] = fluid.layers.resize_bilinear(st4[3], out_shape=shape)
  76. out = fluid.layers.concat(st4, axis=1)
  77. last_channels = sum(self.backbone.channels[str(self.backbone.width)][
  78. -1])
  79. out = self._conv_bn_layer(
  80. input=out,
  81. filter_size=1,
  82. num_filters=last_channels,
  83. stride=1,
  84. if_act=True,
  85. name='conv-2')
  86. out = fluid.layers.conv2d(
  87. input=out,
  88. num_filters=self.num_classes,
  89. filter_size=1,
  90. stride=1,
  91. padding=0,
  92. act=None,
  93. param_attr=ParamAttr(
  94. initializer=MSRA(), name='conv-1_weights'),
  95. bias_attr=False)
  96. input_shape = fluid.layers.shape(image)[-2:]
  97. logit = fluid.layers.resize_bilinear(out, input_shape)
  98. if self.num_classes == 1:
  99. out = sigmoid_to_softmax(logit)
  100. out = fluid.layers.transpose(out, [0, 2, 3, 1])
  101. else:
  102. out = fluid.layers.transpose(logit, [0, 2, 3, 1])
  103. pred = fluid.layers.argmax(out, axis=3)
  104. pred = fluid.layers.unsqueeze(pred, axes=[3])
  105. if self.mode == 'train':
  106. label = inputs['label']
  107. mask = label != self.ignore_index
  108. return self._get_loss(logit, label, mask)
  109. elif self.mode == 'eval':
  110. label = inputs['label']
  111. mask = label != self.ignore_index
  112. loss = self._get_loss(logit, label, mask)
  113. return loss, pred, label, mask
  114. else:
  115. if self.num_classes == 1:
  116. logit = sigmoid_to_softmax(logit)
  117. else:
  118. logit = fluid.layers.softmax(logit, axis=1)
  119. return pred, logit
  120. def generate_inputs(self):
  121. inputs = OrderedDict()
  122. if self.fixed_input_shape is not None:
  123. input_shape = [
  124. None, 3, self.fixed_input_shape[1], self.fixed_input_shape[0]
  125. ]
  126. inputs['image'] = fluid.data(
  127. dtype='float32', shape=input_shape, name='image')
  128. else:
  129. inputs['image'] = fluid.data(
  130. dtype='float32', shape=[None, 3, None, None], name='image')
  131. if self.mode == 'train':
  132. inputs['label'] = fluid.data(
  133. dtype='int32', shape=[None, 1, None, None], name='label')
  134. elif self.mode == 'eval':
  135. inputs['label'] = fluid.data(
  136. dtype='int32', shape=[None, 1, None, None], name='label')
  137. return inputs
  138. def _get_loss(self, logit, label, mask):
  139. avg_loss = 0
  140. if not (self.use_dice_loss or self.use_bce_loss):
  141. avg_loss += softmax_with_loss(
  142. logit,
  143. label,
  144. mask,
  145. num_classes=self.num_classes,
  146. weight=self.class_weight,
  147. ignore_index=self.ignore_index)
  148. else:
  149. if self.use_dice_loss:
  150. avg_loss += dice_loss(logit, label, mask)
  151. if self.use_bce_loss:
  152. avg_loss += bce_loss(
  153. logit, label, mask, ignore_index=self.ignore_index)
  154. return avg_loss
  155. def _conv_bn_layer(self,
  156. input,
  157. filter_size,
  158. num_filters,
  159. stride=1,
  160. padding=1,
  161. num_groups=1,
  162. if_act=True,
  163. name=None):
  164. conv = fluid.layers.conv2d(
  165. input=input,
  166. num_filters=num_filters,
  167. filter_size=filter_size,
  168. stride=stride,
  169. padding=(filter_size - 1) // 2,
  170. groups=num_groups,
  171. act=None,
  172. param_attr=ParamAttr(
  173. initializer=MSRA(), name=name + '_weights'),
  174. bias_attr=False)
  175. bn_name = name + '_bn'
  176. bn = fluid.layers.batch_norm(
  177. input=conv,
  178. param_attr=ParamAttr(
  179. name=bn_name + "_scale",
  180. initializer=fluid.initializer.Constant(1.0)),
  181. bias_attr=ParamAttr(
  182. name=bn_name + "_offset",
  183. initializer=fluid.initializer.Constant(0.0)),
  184. moving_mean_name=bn_name + '_mean',
  185. moving_variance_name=bn_name + '_variance')
  186. if if_act:
  187. bn = fluid.layers.relu(bn)
  188. return bn