runner.py 7.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213
  1. # copyright (c) 2024 PaddlePaddle Authors. All Rights Reserve.
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. import os
  15. import tempfile
  16. from ...base import BaseRunner
  17. from ...base.utils.subprocess import CompletedProcess
  18. class ClsRunner(BaseRunner):
  19. """Cls Runner"""
  20. _INFER_CONFIG_REL_PATH = os.path.join("deploy", "configs", "inference_cls.yaml")
  21. def train(
  22. self,
  23. config_path: str,
  24. cli_args: list,
  25. device: str,
  26. ips: str,
  27. save_dir: str,
  28. do_eval=True,
  29. ) -> CompletedProcess:
  30. """train model
  31. Args:
  32. config_path (str): the config file path used to train.
  33. cli_args (list): the additional parameters.
  34. device (str): the training device.
  35. ips (str): the ip addresses of nodes when using distribution.
  36. save_dir (str): the directory path to save training output.
  37. do_eval (bool, optional): whether or not to evaluate model during training. Defaults to True.
  38. Returns:
  39. CompletedProcess: the result of training subprocess execution.
  40. """
  41. args, env = self.distributed(device, ips, log_dir=save_dir)
  42. cmd = [*args, "tools/train.py", "-c", config_path, *cli_args]
  43. cmd.extend(["-o", f"Global.eval_during_train={do_eval}"])
  44. return self.run_cmd(
  45. cmd,
  46. env=env,
  47. switch_wdir=True,
  48. echo=True,
  49. silent=False,
  50. capture_output=True,
  51. log_path=self._get_train_log_path(save_dir),
  52. )
  53. def evaluate(
  54. self, config_path: str, cli_args: list, device: str, ips: str
  55. ) -> CompletedProcess:
  56. """run model evaluating
  57. Args:
  58. config_path (str): the config file path used to evaluate.
  59. cli_args (list): the additional parameters.
  60. device (str): the evaluating device.
  61. ips (str): the ip addresses of nodes when using distribution.
  62. Returns:
  63. CompletedProcess: the result of evaluating subprocess execution.
  64. """
  65. args, env = self.distributed(device, ips)
  66. cmd = [*args, "tools/eval.py", "-c", config_path, *cli_args]
  67. cp = self.run_cmd(
  68. cmd, env=env, switch_wdir=True, echo=True, silent=False, capture_output=True
  69. )
  70. if cp.returncode == 0:
  71. metric_dict = _extract_eval_metrics(cp.stdout)
  72. cp.metrics = metric_dict
  73. return cp
  74. def predict(
  75. self, config_path: str, cli_args: list, device: str
  76. ) -> CompletedProcess:
  77. """run predicting using dynamic mode
  78. Args:
  79. config_path (str): the config file path used to predict.
  80. cli_args (list): the additional parameters.
  81. device (str): unused.
  82. Returns:
  83. CompletedProcess: the result of predicting subprocess execution.
  84. """
  85. # `device` unused
  86. cmd = [self.python, "tools/infer.py", "-c", config_path, *cli_args]
  87. return self.run_cmd(cmd, switch_wdir=True, echo=True, silent=False)
  88. def export(
  89. self, config_path: str, cli_args: list, device: str, save_dir: str = None
  90. ) -> CompletedProcess:
  91. """run exporting
  92. Args:
  93. config_path (str): the path of config file used to export.
  94. cli_args (list): the additional parameters.
  95. device (str): unused.
  96. save_dir (str, optional): the directory path to save exporting output. Defaults to None.
  97. Returns:
  98. CompletedProcess: the result of exporting subprocess execution.
  99. """
  100. # `device` unused
  101. cmd = [
  102. self.python,
  103. "tools/export_model.py",
  104. "-c",
  105. config_path,
  106. *cli_args,
  107. "-o",
  108. "Global.export_for_fd=True",
  109. "-o",
  110. f"Global.infer_config_path={os.path.join(self.runner_root_path, self._INFER_CONFIG_REL_PATH)}",
  111. ]
  112. cp = self.run_cmd(cmd, switch_wdir=True, echo=True, silent=False)
  113. return cp
  114. def infer(self, config_path: str, cli_args: list, device: str) -> CompletedProcess:
  115. """run predicting using inference model
  116. Args:
  117. config_path (str): the path of config file used to predict.
  118. cli_args (list): the additional parameters.
  119. device (str): unused.
  120. Returns:
  121. CompletedProcess: the result of infering subprocess execution.
  122. """
  123. # `device` unused
  124. cmd = [self.python, "python/predict_cls.py", "-c", config_path, *cli_args]
  125. return self.run_cmd(cmd, switch_wdir="deploy", echo=True, silent=False)
  126. def compression(
  127. self,
  128. config_path: str,
  129. train_cli_args: list,
  130. export_cli_args: list,
  131. device: str,
  132. train_save_dir: str,
  133. ) -> CompletedProcess:
  134. """run compression model
  135. Args:
  136. config_path (str): the path of config file used to predict.
  137. train_cli_args (list): the additional training parameters.
  138. export_cli_args (list): the additional exporting parameters.
  139. device (str): the running device.
  140. train_save_dir (str): the directory path to save output.
  141. Returns:
  142. CompletedProcess: the result of compression subprocess execution.
  143. """
  144. # Step 1: Train model
  145. cp_train = self.train(config_path, train_cli_args, device, None, train_save_dir)
  146. # Step 2: Export model
  147. weight_path = os.path.join(train_save_dir, "best_model", "model")
  148. export_cli_args = [
  149. *export_cli_args,
  150. "-o",
  151. f"Global.pretrained_model={weight_path}",
  152. ]
  153. cp_export = self.export(config_path, export_cli_args, device)
  154. return cp_train, cp_export
  155. def _extract_eval_metrics(stdout: str) -> dict:
  156. """extract evaluation metrics from training log
  157. Args:
  158. stdout (str): the training log
  159. Returns:
  160. dict: the training metric
  161. """
  162. import re
  163. _DP = r"[-+]?[0-9]*\.?[0-9]+(?:[eE][-+]?[0-9]+)?"
  164. patterns = [
  165. r"\[Eval\]\[Epoch 0\]\[Avg\].*top1: (_dp), top5: (_dp)".replace("_dp", _DP),
  166. r"\[Eval\]\[Epoch 0\]\[Avg\].*recall1: (_dp), recall5: (_dp), mAP: (_dp)".replace(
  167. "_dp", _DP
  168. ),
  169. ]
  170. keys = [["val.top1", "val.top5"], ["recall1", "recall5", "mAP"]]
  171. metric_dict = dict()
  172. for pattern, key in zip(patterns, keys):
  173. pattern = re.compile(pattern)
  174. for line in stdout.splitlines():
  175. match = pattern.search(line)
  176. if match:
  177. for k, v in zip(key, map(float, match.groups())):
  178. metric_dict[k] = v
  179. return metric_dict