runner.py 8.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233
  1. # copyright (c) 2024 PaddlePaddle Authors. All Rights Reserve.
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. import os
  15. import tempfile
  16. from ...base import BaseRunner
  17. from ...base.utils.subprocess import CompletedProcess
  18. class TextRecRunner(BaseRunner):
  19. """ Text Recognition Runner """
  20. def train(self,
  21. config_path: str,
  22. cli_args: list,
  23. device: str,
  24. ips: str,
  25. save_dir: str,
  26. do_eval=True) -> CompletedProcess:
  27. """train model
  28. Args:
  29. config_path (str): the config file path used to train.
  30. cli_args (list): the additional parameters.
  31. device (str): the training device.
  32. ips (str): the ip addresses of nodes when using distribution.
  33. save_dir (str): the directory path to save training output.
  34. do_eval (bool, optional): whether or not to evaluate model during training. Defaults to True.
  35. Returns:
  36. CompletedProcess: the result of training subprocess execution.
  37. """
  38. args, env = self.distributed(device, ips, log_dir=save_dir)
  39. cmd = [*args, 'tools/train.py', '-c', config_path, *cli_args]
  40. if do_eval:
  41. # We simply pass here because in PaddleOCR periodic evaluation cannot be switched off
  42. pass
  43. else:
  44. inf = int(1.e11)
  45. cmd.extend(['-o', f"Global.eval_batch_step={inf}"])
  46. return self.run_cmd(
  47. cmd,
  48. env=env,
  49. switch_wdir=True,
  50. echo=True,
  51. silent=False,
  52. capture_output=True,
  53. log_path=self._get_train_log_path(save_dir))
  54. def evaluate(self, config_path: str, cli_args: list, device: str,
  55. ips: str) -> CompletedProcess:
  56. """run model evaluating
  57. Args:
  58. config_path (str): the config file path used to evaluate.
  59. cli_args (list): the additional parameters.
  60. device (str): the evaluating device.
  61. ips (str): the ip addresses of nodes when using distribution.
  62. Returns:
  63. CompletedProcess: the result of evaluating subprocess execution.
  64. """
  65. args, env = self.distributed(device, ips)
  66. cmd = [*args, 'tools/eval.py', '-c', config_path]
  67. cp = self.run_cmd(
  68. cmd,
  69. env=env,
  70. switch_wdir=True,
  71. echo=True,
  72. silent=False,
  73. capture_output=True)
  74. if cp.returncode == 0:
  75. metric_dict = _extract_eval_metrics(cp.stdout)
  76. cp.metrics = metric_dict
  77. return cp
  78. def predict(self, config_path: str, cli_args: list,
  79. device: str) -> CompletedProcess:
  80. """run predicting using dynamic mode
  81. Args:
  82. config_path (str): the config file path used to predict.
  83. cli_args (list): the additional parameters.
  84. device (str): unused.
  85. Returns:
  86. CompletedProcess: the result of predicting subprocess execution.
  87. """
  88. cmd = [self.python, 'tools/infer_rec.py', '-c', config_path]
  89. return self.run_cmd(cmd, switch_wdir=True, echo=True, silent=False)
  90. def export(self,
  91. config_path: str,
  92. cli_args: list,
  93. device: str,
  94. save_dir: str=None) -> CompletedProcess:
  95. """run exporting
  96. Args:
  97. config_path (str): the path of config file used to export.
  98. cli_args (list): the additional parameters.
  99. device (str): unused.
  100. save_dir (str, optional): the directory path to save exporting output. Defaults to None.
  101. Returns:
  102. CompletedProcess: the result of exporting subprocess execution.
  103. """
  104. # `device` unused
  105. cmd = [self.python, 'tools/export_model.py', '-c', config_path]
  106. cp = self.run_cmd(cmd, switch_wdir=True, echo=True, silent=False)
  107. return cp
  108. def infer(self, config_path: str, cli_args: list,
  109. device: str) -> CompletedProcess:
  110. """run predicting using inference model
  111. Args:
  112. config_path (str): the path of config file used to predict.
  113. cli_args (list): the additional parameters.
  114. device (str): unused.
  115. Returns:
  116. CompletedProcess: the result of infering subprocess execution.
  117. """
  118. cmd = [self.python, 'tools/infer/predict_rec.py', *cli_args]
  119. return self.run_cmd(cmd, switch_wdir=True, echo=True, silent=False)
  120. def compression(self,
  121. config_path: str,
  122. train_cli_args: list,
  123. export_cli_args: list,
  124. device: str,
  125. train_save_dir: str) -> CompletedProcess:
  126. """run compression model
  127. Args:
  128. config_path (str): the path of config file used to predict.
  129. train_cli_args (list): the additional training parameters.
  130. export_cli_args (list): the additional exporting parameters.
  131. device (str): the running device.
  132. train_save_dir (str): the directory path to save output.
  133. Returns:
  134. CompletedProcess: the result of compression subprocess execution.
  135. """
  136. # Step 1: Train model
  137. args, env = self.distributed(device, log_dir=train_save_dir)
  138. cmd = [*args, 'deploy/slim/quantization/quant.py', '-c', config_path]
  139. cp_train = self.run_cmd(
  140. cmd,
  141. env=env,
  142. switch_wdir=True,
  143. echo=True,
  144. silent=False,
  145. capture_output=True,
  146. log_path=self._get_train_log_path(train_save_dir))
  147. # Step 2: Export model
  148. export_cli_args = [
  149. *export_cli_args, '-o',
  150. f"Global.checkpoints={train_save_dir}/latest"
  151. ]
  152. cmd = [
  153. self.python, 'deploy/slim/quantization/export_model.py', '-c',
  154. config_path, *export_cli_args
  155. ]
  156. cp_export = self.run_cmd(cmd, switch_wdir=True, echo=True, silent=False)
  157. return cp_train, cp_export
  158. def _extract_eval_metrics(stdout: str) -> dict:
  159. """extract evaluation metrics from training log
  160. Args:
  161. stdout (str): the training log
  162. Returns:
  163. dict: the training metric
  164. """
  165. import re
  166. def _lazy_split_lines(s):
  167. prev_idx = 0
  168. while True:
  169. curr_idx = s.find(os.linesep, prev_idx)
  170. if curr_idx == -1:
  171. curr_idx = len(s)
  172. yield s[prev_idx:curr_idx]
  173. prev_idx = curr_idx + len(os.linesep)
  174. if prev_idx >= len(s):
  175. break
  176. _DP = r'[-+]?[0-9]*\.?[0-9]+(?:[eE][-+]?[0-9]+)?'
  177. pattern_key_pairs = [
  178. (re.compile(r'acc:(_dp)$'.replace('_dp', _DP)), 'acc'),
  179. (re.compile(r'norm_edit_dis:(_dp)$'.replace('_dp', _DP)),
  180. 'norm_edit_dis'),
  181. (re.compile(r'Teacher_acc:(_dp)$'.replace('_dp', _DP)), 'teacher_acc'),
  182. (re.compile(r'Teacher_norm_edit_dis:(_dp)$'.replace('_dp', _DP)),
  183. 'teacher_norm_edit_dis'),
  184. (re.compile(r'precision:(_dp)$'.replace('_dp', _DP)), 'precision'),
  185. (re.compile(r'recall:(_dp)$'.replace('_dp', _DP)), 'recall'),
  186. (re.compile(r'hmean:(_dp)$'.replace('_dp', _DP)), 'hmean'),
  187. ]
  188. metric_dict = dict()
  189. start_match = False
  190. for line in _lazy_split_lines(stdout):
  191. if 'metric eval' in line:
  192. start_match = True
  193. if start_match:
  194. for pattern, key in pattern_key_pairs:
  195. match = pattern.search(line)
  196. if match:
  197. assert len(match.groups()) == 1
  198. # Newer overwrites older
  199. metric_dict[key] = float(match.group(1))
  200. return metric_dict