loss.py 4.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116
  1. # copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. import paddle.fluid as fluid
  15. import numpy as np
  16. def softmax_with_loss(logit,
  17. label,
  18. ignore_mask=None,
  19. num_classes=2,
  20. weight=None,
  21. ignore_index=255):
  22. ignore_mask = fluid.layers.cast(ignore_mask, 'float32')
  23. label = fluid.layers.elementwise_min(
  24. label, fluid.layers.assign(
  25. np.array([num_classes - 1], dtype=np.int32)))
  26. logit = fluid.layers.transpose(logit, [0, 2, 3, 1])
  27. logit = fluid.layers.reshape(logit, [-1, num_classes])
  28. label = fluid.layers.reshape(label, [-1, 1])
  29. label = fluid.layers.cast(label, 'int64')
  30. ignore_mask = fluid.layers.reshape(ignore_mask, [-1, 1])
  31. if weight is None:
  32. loss, probs = fluid.layers.softmax_with_cross_entropy(
  33. logit, label, ignore_index=ignore_index, return_softmax=True)
  34. else:
  35. label_one_hot = fluid.layers.one_hot(input=label, depth=num_classes)
  36. if isinstance(weight, list):
  37. assert len(
  38. weight
  39. ) == num_classes, "weight length must equal num of classes"
  40. weight = fluid.layers.assign(np.array([weight], dtype='float32'))
  41. elif isinstance(weight, str):
  42. assert weight.lower(
  43. ) == 'dynamic', 'if weight is string, must be dynamic!'
  44. tmp = []
  45. total_num = fluid.layers.cast(
  46. fluid.layers.shape(label)[0], 'float32')
  47. for i in range(num_classes):
  48. cls_pixel_num = fluid.layers.reduce_sum(label_one_hot[:, i])
  49. ratio = total_num / (cls_pixel_num + 1)
  50. tmp.append(ratio)
  51. weight = fluid.layers.concat(tmp)
  52. weight = weight / fluid.layers.reduce_sum(weight) * num_classes
  53. elif isinstance(weight, fluid.layers.Variable):
  54. pass
  55. else:
  56. raise ValueError(
  57. 'Expect weight is a list, string or Variable, but receive {}'.
  58. format(type(weight)))
  59. weight = fluid.layers.reshape(weight, [1, num_classes])
  60. weighted_label_one_hot = fluid.layers.elementwise_mul(
  61. label_one_hot, weight)
  62. probs = fluid.layers.softmax(logit)
  63. loss = fluid.layers.cross_entropy(
  64. probs,
  65. weighted_label_one_hot,
  66. soft_label=True,
  67. ignore_index=ignore_index)
  68. weighted_label_one_hot.stop_gradient = True
  69. loss = loss * ignore_mask
  70. avg_loss = fluid.layers.mean(loss) / (
  71. fluid.layers.mean(ignore_mask) + 0.00001)
  72. label.stop_gradient = True
  73. ignore_mask.stop_gradient = True
  74. return avg_loss
  75. # to change, how to appicate ignore index and ignore mask
  76. def dice_loss(logit, label, ignore_mask=None, epsilon=0.00001):
  77. if logit.shape[1] != 1 or label.shape[1] != 1 or ignore_mask.shape[1] != 1:
  78. raise Exception(
  79. "dice loss is only applicable to one channel classfication")
  80. ignore_mask = fluid.layers.cast(ignore_mask, 'float32')
  81. logit = fluid.layers.transpose(logit, [0, 2, 3, 1])
  82. label = fluid.layers.transpose(label, [0, 2, 3, 1])
  83. label = fluid.layers.cast(label, 'int64')
  84. ignore_mask = fluid.layers.transpose(ignore_mask, [0, 2, 3, 1])
  85. logit = fluid.layers.sigmoid(logit)
  86. logit = logit * ignore_mask
  87. label = label * ignore_mask
  88. reduce_dim = list(range(1, len(logit.shape)))
  89. inse = fluid.layers.reduce_sum(logit * label, dim=reduce_dim)
  90. dice_denominator = fluid.layers.reduce_sum(
  91. logit, dim=reduce_dim) + fluid.layers.reduce_sum(
  92. label, dim=reduce_dim)
  93. dice_score = 1 - inse * 2 / (dice_denominator + epsilon)
  94. label.stop_gradient = True
  95. ignore_mask.stop_gradient = True
  96. return fluid.layers.reduce_mean(dice_score)
  97. def bce_loss(logit, label, ignore_mask=None, ignore_index=255):
  98. if logit.shape[1] != 1 or label.shape[1] != 1 or ignore_mask.shape[1] != 1:
  99. raise Exception("bce loss is only applicable to binary classfication")
  100. label = fluid.layers.cast(label, 'float32')
  101. loss = fluid.layers.sigmoid_cross_entropy_with_logits(
  102. x=logit, label=label, ignore_index=ignore_index,
  103. normalize=True) # or False
  104. loss = fluid.layers.reduce_sum(loss)
  105. label.stop_gradient = True
  106. ignore_mask.stop_gradient = True
  107. return loss