runner.py 8.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262
  1. # copyright (c) 2024 PaddlePaddle Authors. All Rights Reserve.
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. import os
  15. import tempfile
  16. from ...base import BaseRunner
  17. from ...base.utils.arg import gather_opts_args
  18. from ...base.utils.subprocess import CompletedProcess
  19. class SegRunner(BaseRunner):
  20. """Semantic Segmentation Runner"""
  21. def train(
  22. self,
  23. config_path: str,
  24. cli_args: list,
  25. device: str,
  26. ips: str,
  27. save_dir: str,
  28. do_eval=True,
  29. ) -> CompletedProcess:
  30. """train model
  31. Args:
  32. config_path (str): the config file path used to train.
  33. cli_args (list): the additional parameters.
  34. device (str): the training device.
  35. ips (str): the ip addresses of nodes when using distribution.
  36. save_dir (str): the directory path to save training output.
  37. do_eval (bool, optional): whether or not to evaluate model during training. Defaults to True.
  38. Returns:
  39. CompletedProcess: the result of training subprocess execution.
  40. """
  41. args, env = self.distributed(device, ips, log_dir=save_dir)
  42. cli_args = self._gather_opts_args(cli_args)
  43. cmd = [*args, "tools/train.py"]
  44. if do_eval:
  45. cmd.append("--do_eval")
  46. cmd.extend(["--config", config_path, *cli_args])
  47. return self.run_cmd(
  48. cmd,
  49. env=env,
  50. switch_wdir=True,
  51. echo=True,
  52. silent=False,
  53. capture_output=True,
  54. log_path=self._get_train_log_path(save_dir),
  55. )
  56. def evaluate(
  57. self, config_path: str, cli_args: list, device: str, ips: str
  58. ) -> CompletedProcess:
  59. """run model evaluating
  60. Args:
  61. config_path (str): the config file path used to evaluate.
  62. cli_args (list): the additional parameters.
  63. device (str): the evaluating device.
  64. ips (str): the ip addresses of nodes when using distribution.
  65. Returns:
  66. CompletedProcess: the result of evaluating subprocess execution.
  67. """
  68. args, env = self.distributed(device, ips)
  69. cli_args = self._gather_opts_args(cli_args)
  70. cmd = [*args, "tools/val.py", "--config", config_path, *cli_args]
  71. cp = self.run_cmd(
  72. cmd, env=env, switch_wdir=True, echo=True, silent=False, capture_output=True
  73. )
  74. if cp.returncode == 0:
  75. metric_dict = _extract_eval_metrics(cp.stdout)
  76. cp.metrics = metric_dict
  77. return cp
  78. def predict(
  79. self, config_path: str, cli_args: list, device: str
  80. ) -> CompletedProcess:
  81. """run predicting using dynamic mode
  82. Args:
  83. config_path (str): the config file path used to predict.
  84. cli_args (list): the additional parameters.
  85. device (str): unused.
  86. Returns:
  87. CompletedProcess: the result of predicting subprocess execution.
  88. """
  89. # `device` unused
  90. cli_args = self._gather_opts_args(cli_args)
  91. cmd = [self.python, "tools/predict.py", "--config", config_path, *cli_args]
  92. return self.run_cmd(cmd, switch_wdir=True, echo=True, silent=False)
  93. def analyse(self, config_path, cli_args, device, ips):
  94. """analyse"""
  95. args, env = self.distributed(device, ips)
  96. cli_args = self._gather_opts_args(cli_args)
  97. cmd = [*args, "tools/analyse.py", "--config", config_path, *cli_args]
  98. cp = self.run_cmd(
  99. cmd, env=env, switch_wdir=True, echo=True, silent=False, capture_output=True
  100. )
  101. return cp
  102. def export(self, config_path: str, cli_args: list, device: str) -> CompletedProcess:
  103. """run exporting
  104. Args:
  105. config_path (str): the path of config file used to export.
  106. cli_args (list): the additional parameters.
  107. device (str): unused.
  108. Returns:
  109. CompletedProcess: the result of exporting subprocess execution.
  110. """
  111. # `device` unused
  112. cli_args = self._gather_opts_args(cli_args)
  113. cmd = [
  114. self.python,
  115. "tools/export.py",
  116. "--for_fd",
  117. "--config",
  118. config_path,
  119. *cli_args,
  120. ]
  121. cp = self.run_cmd(cmd, switch_wdir=True, echo=True, silent=False)
  122. return cp
  123. def infer(self, config_path: str, cli_args: list, device: str) -> CompletedProcess:
  124. """run predicting using inference model
  125. Args:
  126. config_path (str): the path of config file used to predict.
  127. cli_args (list): the additional parameters.
  128. device (str): unused.
  129. Returns:
  130. CompletedProcess: the result of infering subprocess execution.
  131. """
  132. # `device` unused
  133. cli_args = self._gather_opts_args(cli_args)
  134. cmd = [
  135. self.python,
  136. "deploy/python/infer.py",
  137. "--config",
  138. config_path,
  139. *cli_args,
  140. ]
  141. return self.run_cmd(cmd, switch_wdir=True, echo=True, silent=False)
  142. def compression(
  143. self,
  144. config_path: str,
  145. train_cli_args: list,
  146. export_cli_args: list,
  147. device: str,
  148. train_save_dir: str,
  149. ) -> CompletedProcess:
  150. """run compression model
  151. Args:
  152. config_path (str): the path of config file used to predict.
  153. train_cli_args (list): the additional training parameters.
  154. export_cli_args (list): the additional exporting parameters.
  155. device (str): the running device.
  156. train_save_dir (str): the directory path to save output.
  157. Returns:
  158. CompletedProcess: the result of compression subprocess execution.
  159. """
  160. # Step 1: Train model
  161. args, env = self.distributed(device, log_dir=train_save_dir)
  162. train_cli_args = self._gather_opts_args(train_cli_args)
  163. # Note that we add `--do_eval` here so we can have `train_save_dir/best_model/model.pdparams` saved
  164. cmd = [
  165. *args,
  166. "deploy/slim/quant/qat_train.py",
  167. "--do_eval",
  168. "--config",
  169. config_path,
  170. *train_cli_args,
  171. ]
  172. cp_train = self.run_cmd(
  173. cmd,
  174. env=env,
  175. switch_wdir=True,
  176. echo=True,
  177. silent=False,
  178. capture_output=True,
  179. log_path=self._get_train_log_path(train_save_dir),
  180. )
  181. # Step 2: Export model
  182. export_cli_args = self._gather_opts_args(export_cli_args)
  183. # We export the best model on the validation dataset
  184. weight_path = os.path.join(train_save_dir, "best_model", "model.pdparams")
  185. cmd = [
  186. self.python,
  187. "deploy/slim/quant/qat_export.py",
  188. "--for_fd",
  189. "--config",
  190. config_path,
  191. "--model_path",
  192. weight_path,
  193. *export_cli_args,
  194. ]
  195. cp_export = self.run_cmd(cmd, switch_wdir=True, echo=True, silent=False)
  196. return cp_train, cp_export
  197. def _gather_opts_args(self, args):
  198. # Since `--opts` in PaddleSeg does not use `action='append'`
  199. # We collect and arrange all opts args here
  200. # e.g.: python tools/train.py --config xxx --opts a=1 c=3 --opts b=2
  201. # => python tools/train.py --config xxx c=3 --opts a=1 b=2
  202. return gather_opts_args(args, "--opts")
  203. def _extract_eval_metrics(stdout: str) -> dict:
  204. """extract evaluation metrics from training log
  205. Args:
  206. stdout (str): the training log
  207. Returns:
  208. dict: the training metric
  209. """
  210. import re
  211. _DP = r"[-+]?[0-9]*\.?[0-9]+(?:[eE][-+]?[0-9]+)?"
  212. pattern = r"Images: \d+ mIoU: (_dp) Acc: (_dp) Kappa: (_dp) Dice: (_dp)".replace(
  213. "_dp", _DP
  214. )
  215. keys = ["mIoU", "Acc", "Kappa", "Dice"]
  216. metric_dict = dict()
  217. pattern = re.compile(pattern)
  218. # TODO: Use lazy version to make it more efficient
  219. lines = stdout.splitlines()
  220. for line in lines:
  221. match = pattern.search(line)
  222. if match:
  223. for k, v in zip(keys, map(float, match.groups())):
  224. metric_dict[k] = v
  225. return metric_dict