visualize.py 5.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127
  1. #copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
  2. #
  3. #Licensed under the Apache License, Version 2.0 (the "License");
  4. #you may not use this file except in compliance with the License.
  5. #You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. #Unless required by applicable law or agreed to in writing, software
  10. #distributed under the License is distributed on an "AS IS" BASIS,
  11. #WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. #See the License for the specific language governing permissions and
  13. #limitations under the License.
  14. import os
  15. import cv2
  16. import copy
  17. import os.path as osp
  18. import numpy as np
  19. from .core.explanation import Explanation
  20. from .core.normlime_base import precompute_normlime_weights
  21. def visualize(img_file,
  22. model,
  23. dataset=None,
  24. explanation_type='lime',
  25. num_samples=3000,
  26. batch_size=50,
  27. save_dir='./'):
  28. if model.status != 'Normal':
  29. raise Exception('The explanation only can deal with the Normal model')
  30. model.arrange_transforms(
  31. transforms=model.test_transforms, mode='test')
  32. tmp_transforms = copy.deepcopy(model.test_transforms)
  33. tmp_transforms.transforms = tmp_transforms.transforms[:-2]
  34. img = tmp_transforms(img_file)[0]
  35. img = np.around(img).astype('uint8')
  36. img = np.expand_dims(img, axis=0)
  37. explaier = None
  38. if explanation_type == 'lime':
  39. explaier = get_lime_explaier(img, model, dataset, num_samples=num_samples, batch_size=batch_size)
  40. elif explanation_type == 'normlime':
  41. if dataset is None:
  42. raise Exception('The dataset is None. Cannot implement this kind of explanation')
  43. explaier = get_normlime_explaier(img, model, dataset,
  44. num_samples=num_samples, batch_size=batch_size,
  45. save_dir=save_dir)
  46. else:
  47. raise Exception('The {} explanantion method is not supported yet!'.format(explanation_type))
  48. img_name = osp.splitext(osp.split(img_file)[-1])[0]
  49. explaier.explain(img, save_dir=save_dir)
  50. def get_lime_explaier(img, model, dataset, num_samples=3000, batch_size=50):
  51. def predict_func(image):
  52. image = image.astype('float32')
  53. for i in range(image.shape[0]):
  54. image[i] = cv2.cvtColor(image[i], cv2.COLOR_RGB2BGR)
  55. tmp_transforms = copy.deepcopy(model.test_transforms.transforms)
  56. model.test_transforms.transforms = model.test_transforms.transforms[-2:]
  57. out = model.explanation_predict(image)
  58. model.test_transforms.transforms = tmp_transforms
  59. return out[0]
  60. labels_name = None
  61. if dataset is not None:
  62. labels_name = dataset.labels
  63. explaier = Explanation('lime',
  64. predict_func,
  65. labels_name,
  66. num_samples=num_samples,
  67. batch_size=batch_size)
  68. return explaier
  69. def get_normlime_explaier(img, model, dataset, num_samples=3000, batch_size=50, save_dir='./'):
  70. def precompute_predict_func(image):
  71. image = image.astype('float32')
  72. tmp_transforms = copy.deepcopy(model.test_transforms.transforms)
  73. model.test_transforms.transforms = model.test_transforms.transforms[-2:]
  74. out = model.explanation_predict(image)
  75. model.test_transforms.transforms = tmp_transforms
  76. return out[0]
  77. def predict_func(image):
  78. image = image.astype('float32')
  79. for i in range(image.shape[0]):
  80. image[i] = cv2.cvtColor(image[i], cv2.COLOR_RGB2BGR)
  81. tmp_transforms = copy.deepcopy(model.test_transforms.transforms)
  82. model.test_transforms.transforms = model.test_transforms.transforms[-2:]
  83. out = model.explanation_predict(image)
  84. model.test_transforms.transforms = tmp_transforms
  85. return out[0]
  86. labels_name = None
  87. if dataset is not None:
  88. labels_name = dataset.labels
  89. root_path = os.environ['HOME']
  90. root_path = osp.join(root_path, '.paddlex')
  91. pre_models_path = osp.join(root_path, "pre_models")
  92. if not osp.exists(pre_models_path):
  93. os.makedirs(pre_models_path)
  94. # TODO
  95. # paddlex.utils.download_and_decompress(url, path=pre_models_path)
  96. npy_dir = precompute_for_normlime(precompute_predict_func,
  97. dataset,
  98. num_samples=num_samples,
  99. batch_size=batch_size,
  100. save_dir=save_dir)
  101. explaier = Explanation('normlime',
  102. predict_func,
  103. labels_name,
  104. num_samples=num_samples,
  105. batch_size=batch_size,
  106. normlime_weights=npy_dir)
  107. return explaier
  108. def precompute_for_normlime(predict_func, dataset, num_samples=3000, batch_size=50, save_dir='./'):
  109. image_list = []
  110. for item in dataset.file_list:
  111. image_list.append(item[0])
  112. return precompute_normlime_weights(
  113. image_list,
  114. predict_func,
  115. num_samples=num_samples,
  116. batch_size=batch_size,
  117. save_dir=save_dir)