metrics.py 3.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112
  1. # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. from __future__ import absolute_import
  15. from __future__ import division
  16. from __future__ import print_function
  17. from sklearn.metrics import hamming_loss
  18. from sklearn.metrics import accuracy_score as accuracy_metric
  19. from sklearn.metrics import multilabel_confusion_matrix
  20. from sklearn.metrics import precision_recall_fscore_support
  21. from sklearn.metrics import average_precision_score
  22. from sklearn.preprocessing import binarize
  23. import numpy as np
  24. __all__ = [
  25. "multi_hot_encode", "hamming_distance", "accuracy_score",
  26. "precision_recall_fscore", "mean_average_precision"
  27. ]
  28. def multi_hot_encode(logits, threshold=0.5):
  29. """
  30. Encode logits to multi-hot by elementwise for multilabel
  31. """
  32. return binarize(logits, threshold=threshold)
  33. def hamming_distance(output, target):
  34. """
  35. Soft metric based label for multilabel classification
  36. Returns:
  37. The smaller the return value is, the better model is.
  38. """
  39. return hamming_loss(target, output)
  40. def accuracy_score(output, target, base="sample"):
  41. """
  42. Hard metric for multilabel classification
  43. Args:
  44. output:
  45. target:
  46. base: ["sample", "label"], default="sample"
  47. if "sample", return metric score based sample,
  48. if "label", return metric score based label.
  49. Returns:
  50. accuracy:
  51. """
  52. assert base in ["sample", "label"], 'must be one of ["sample", "label"]'
  53. if base == "sample":
  54. accuracy = accuracy_metric(target, output)
  55. elif base == "label":
  56. mcm = multilabel_confusion_matrix(target, output)
  57. tns = mcm[:, 0, 0]
  58. fns = mcm[:, 1, 0]
  59. tps = mcm[:, 1, 1]
  60. fps = mcm[:, 0, 1]
  61. accuracy = (sum(tps) + sum(tns)) / (
  62. sum(tps) + sum(tns) + sum(fns) + sum(fps))
  63. return accuracy
  64. def precision_recall_fscore(output, target):
  65. """
  66. Metric based label for multilabel classification
  67. Returns:
  68. precisions:
  69. recalls:
  70. fscores:
  71. """
  72. precisions, recalls, fscores, _ = precision_recall_fscore_support(target,
  73. output)
  74. return precisions, recalls, fscores
  75. def mean_average_precision(logits, target):
  76. """
  77. Calculate average precision
  78. Args:
  79. logits: probability from network before sigmoid or softmax
  80. target: ground truth, 0 or 1
  81. """
  82. if not (isinstance(logits, np.ndarray) and isinstance(target, np.ndarray)):
  83. raise TypeError("logits and target should be np.ndarray.")
  84. aps = []
  85. for i in range(target.shape[1]):
  86. ap = average_precision_score(target[:, i], logits[:, i])
  87. aps.append(ap)
  88. return np.mean(aps)