evaluator.py 4.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154
  1. # copyright (c) 2024 PaddlePaddle Authors. All Rights Reserve.
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. import os
  15. from pathlib import Path
  16. from abc import ABC, abstractmethod
  17. from .build_model import build_model
  18. from ...utils.device import get_device
  19. from ...utils.misc import AutoRegisterABCMetaClass
  20. from ...utils.config import AttrDict
  21. from ...utils.logging import *
  22. def build_evaluater(config: AttrDict) -> "BaseEvaluator":
  23. """build model evaluater
  24. Args:
  25. config (AttrDict): PaddleX pipeline config, which is loaded from pipeline yaml file.
  26. Returns:
  27. BaseEvaluator: the evaluater, which is subclass of BaseEvaluator.
  28. """
  29. model_name = config.Global.model
  30. return BaseEvaluator.get(model_name)(config)
  31. class BaseEvaluator(ABC, metaclass=AutoRegisterABCMetaClass):
  32. """ Base Model Evaluator """
  33. __is_base = True
  34. def __init__(self, config):
  35. """Initialize the instance.
  36. Args:
  37. config (AttrDict): PaddleX pipeline config, which is loaded from pipeline yaml file.
  38. """
  39. self.global_config = config.Global
  40. self.eval_config = config.Evaluate
  41. config_path = self.get_config_path(self.eval_config.weight_path)
  42. if not config_path.exists():
  43. warning(
  44. f"The config file(`{config_path}`) related to weight file(`{self.eval_config.weight_path}`) is not exist, use default instead."
  45. )
  46. config_path = None
  47. self.pdx_config, self.pdx_model = build_model(
  48. self.global_config.model, config_path=config_path)
  49. def get_config_path(self, weight_path):
  50. """
  51. get config path
  52. Args:
  53. weight_path (str): The path to the weight
  54. Returns:
  55. config_path (str): The path to the config
  56. """
  57. config_path = Path(weight_path).parent / "config.yaml"
  58. return config_path
  59. def check_return(self, metrics: dict) -> bool:
  60. """check evaluation metrics
  61. Args:
  62. metrics (dict): evaluation output metrics
  63. Returns:
  64. bool: whether the format of evaluation metrics is legal
  65. """
  66. if not isinstance(metrics, dict):
  67. return False
  68. for metric in metrics:
  69. val = metrics[metric]
  70. if not isinstance(val, float):
  71. return False
  72. return True
  73. def __call__(self) -> dict:
  74. """execute model training
  75. Returns:
  76. dict: the evaluation metrics
  77. """
  78. metrics = self.eval()
  79. assert self.check_return(
  80. metrics
  81. ), f"The return value({metrics}) of Evaluator.eval() is illegal!"
  82. return {"metrics": metrics}
  83. def dump_config(self, config_file_path=None):
  84. """dump the config
  85. Args:
  86. config_file_path (str, optional): the path to save dumped config.
  87. Defaults to None, means that save in `Global.output` as `config.yaml`.
  88. """
  89. if config_file_path is None:
  90. config_file_path = os.path.join(self.global_config.output,
  91. "config.yaml")
  92. self.pdx_config.dump(config_file_path)
  93. def eval(self):
  94. """firstly, update evaluation config, then evaluate model, finally return the evaluation result
  95. """
  96. self.update_config()
  97. # self.dump_config()
  98. evaluate_result = self.pdx_model.evaluate(**self.get_eval_kwargs())
  99. assert evaluate_result.returncode == 0, f"Encountered an unexpected error({evaluate_result.returncode}) in \
  100. evaling!"
  101. return evaluate_result.metrics
  102. def get_device(self, using_device_number: int=None) -> str:
  103. """get device setting from config
  104. Args:
  105. using_device_number (int, optional): specify device number to use.
  106. Defaults to None, means that base on config setting.
  107. Returns:
  108. str: device setting, such as: `gpu:0,1`, `npu:0,1`, `cpu`.
  109. """
  110. return get_device(
  111. self.global_config.device, using_device_number=using_device_number)
  112. @abstractmethod
  113. def update_config(self):
  114. """update evalution config
  115. """
  116. raise NotImplementedError
  117. @abstractmethod
  118. def get_eval_kwargs(self):
  119. """get key-value arguments of model evalution function
  120. """
  121. raise NotImplementedError