optimizer.py 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337
  1. # Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. from __future__ import absolute_import
  15. from __future__ import division
  16. from __future__ import print_function
  17. import math
  18. import paddle
  19. import paddle.nn as nn
  20. import paddle.optimizer as optimizer
  21. import paddle.regularizer as regularizer
  22. from paddlex.ppdet.core.workspace import register, serializable
  23. __all__ = ['LearningRate', 'OptimizerBuilder']
  24. from paddlex.ppdet.utils.logger import setup_logger
  25. logger = setup_logger(__name__)
  26. @serializable
  27. class CosineDecay(object):
  28. """
  29. Cosine learning rate decay
  30. Args:
  31. max_epochs (int): max epochs for the training process.
  32. if you commbine cosine decay with warmup, it is recommended that
  33. the max_iters is much larger than the warmup iter
  34. """
  35. def __init__(self, max_epochs=1000, use_warmup=True, eta_min=0):
  36. self.max_epochs = max_epochs
  37. self.use_warmup = use_warmup
  38. self.eta_min = eta_min
  39. def __call__(self,
  40. base_lr=None,
  41. boundary=None,
  42. value=None,
  43. step_per_epoch=None):
  44. assert base_lr is not None, "either base LR or values should be provided"
  45. max_iters = self.max_epochs * int(step_per_epoch)
  46. if boundary is not None and value is not None and self.use_warmup:
  47. warmup_iters = len(boundary)
  48. for i in range(int(boundary[-1]), max_iters):
  49. boundary.append(i)
  50. decayed_lr = base_lr * 0.5 * (math.cos(
  51. (i - warmup_iters) * math.pi /
  52. (max_iters - warmup_iters)) + 1)
  53. value.append(decayed_lr)
  54. return optimizer.lr.PiecewiseDecay(boundary, value)
  55. return optimizer.lr.CosineAnnealingDecay(
  56. base_lr, T_max=max_iters, eta_min=self.eta_min)
  57. @serializable
  58. class PiecewiseDecay(object):
  59. """
  60. Multi step learning rate decay
  61. Args:
  62. gamma (float | list): decay factor
  63. milestones (list): steps at which to decay learning rate
  64. """
  65. def __init__(self,
  66. gamma=[0.1, 0.01],
  67. milestones=[8, 11],
  68. values=None,
  69. use_warmup=True):
  70. super(PiecewiseDecay, self).__init__()
  71. if type(gamma) is not list:
  72. self.gamma = []
  73. for i in range(len(milestones)):
  74. self.gamma.append(gamma / 10**i)
  75. else:
  76. self.gamma = gamma
  77. self.milestones = milestones
  78. self.values = values
  79. self.use_warmup = use_warmup
  80. def __call__(self,
  81. base_lr=None,
  82. boundary=None,
  83. value=None,
  84. step_per_epoch=None):
  85. if boundary is not None and self.use_warmup:
  86. boundary.extend([int(step_per_epoch) * i for i in self.milestones])
  87. else:
  88. # do not use LinearWarmup
  89. boundary = [int(step_per_epoch) * i for i in self.milestones]
  90. value = [base_lr] # during step[0, boundary[0]] is base_lr
  91. # self.values is setted directly in config
  92. if self.values is not None:
  93. assert len(self.milestones) + 1 == len(self.values)
  94. return optimizer.lr.PiecewiseDecay(boundary, self.values)
  95. # value is computed by self.gamma
  96. value = value if value is not None else [base_lr]
  97. for i in self.gamma:
  98. value.append(base_lr * i)
  99. return optimizer.lr.PiecewiseDecay(boundary, value)
  100. @serializable
  101. class LinearWarmup(object):
  102. """
  103. Warm up learning rate linearly
  104. Args:
  105. steps (int): warm up steps
  106. start_factor (float): initial learning rate factor
  107. """
  108. def __init__(self, steps=500, start_factor=1. / 3):
  109. super(LinearWarmup, self).__init__()
  110. self.steps = steps
  111. self.start_factor = start_factor
  112. def __call__(self, base_lr, step_per_epoch):
  113. boundary = []
  114. value = []
  115. for i in range(self.steps + 1):
  116. if self.steps > 0:
  117. alpha = i / self.steps
  118. factor = self.start_factor * (1 - alpha) + alpha
  119. lr = base_lr * factor
  120. value.append(lr)
  121. if i > 0:
  122. boundary.append(i)
  123. return boundary, value
  124. @serializable
  125. class BurninWarmup(object):
  126. """
  127. Warm up learning rate in burnin mode
  128. Args:
  129. steps (int): warm up steps
  130. """
  131. def __init__(self, steps=1000):
  132. super(BurninWarmup, self).__init__()
  133. self.steps = steps
  134. def __call__(self, base_lr, step_per_epoch):
  135. boundary = []
  136. value = []
  137. burnin = min(self.steps, step_per_epoch)
  138. for i in range(burnin + 1):
  139. factor = (i * 1.0 / burnin)**4
  140. lr = base_lr * factor
  141. value.append(lr)
  142. if i > 0:
  143. boundary.append(i)
  144. return boundary, value
  145. @register
  146. class LearningRate(object):
  147. """
  148. Learning Rate configuration
  149. Args:
  150. base_lr (float): base learning rate
  151. schedulers (list): learning rate schedulers
  152. """
  153. __category__ = 'optim'
  154. def __init__(self,
  155. base_lr=0.01,
  156. schedulers=[PiecewiseDecay(), LinearWarmup()]):
  157. super(LearningRate, self).__init__()
  158. self.base_lr = base_lr
  159. self.schedulers = schedulers
  160. def __call__(self, step_per_epoch):
  161. assert len(self.schedulers) >= 1
  162. if not self.schedulers[0].use_warmup:
  163. return self.schedulers[0](base_lr=self.base_lr,
  164. step_per_epoch=step_per_epoch)
  165. # TODO: split warmup & decay
  166. # warmup
  167. boundary, value = self.schedulers[1](self.base_lr, step_per_epoch)
  168. # decay
  169. decay_lr = self.schedulers[0](self.base_lr, boundary, value,
  170. step_per_epoch)
  171. return decay_lr
  172. @register
  173. class OptimizerBuilder():
  174. """
  175. Build optimizer handles
  176. Args:
  177. regularizer (object): an `Regularizer` instance
  178. optimizer (object): an `Optimizer` instance
  179. """
  180. __category__ = 'optim'
  181. def __init__(self,
  182. clip_grad_by_norm=None,
  183. regularizer={'type': 'L2',
  184. 'factor': .0001},
  185. optimizer={'type': 'Momentum',
  186. 'momentum': .9}):
  187. self.clip_grad_by_norm = clip_grad_by_norm
  188. self.regularizer = regularizer
  189. self.optimizer = optimizer
  190. def __call__(self, learning_rate, model=None):
  191. if self.clip_grad_by_norm is not None:
  192. grad_clip = nn.ClipGradByGlobalNorm(
  193. clip_norm=self.clip_grad_by_norm)
  194. else:
  195. grad_clip = None
  196. if self.regularizer and self.regularizer != 'None':
  197. reg_type = self.regularizer['type'] + 'Decay'
  198. reg_factor = self.regularizer['factor']
  199. regularization = getattr(regularizer, reg_type)(reg_factor)
  200. else:
  201. regularization = None
  202. optim_args = self.optimizer.copy()
  203. optim_type = optim_args['type']
  204. del optim_args['type']
  205. if optim_type != 'AdamW':
  206. optim_args['weight_decay'] = regularization
  207. op = getattr(optimizer, optim_type)
  208. if 'without_weight_decay_params' in optim_args:
  209. keys = optim_args['without_weight_decay_params']
  210. params = [{
  211. 'params': [
  212. p for n, p in model.named_parameters()
  213. if any([k in n for k in keys])
  214. ],
  215. 'weight_decay': 0.
  216. }, {
  217. 'params': [
  218. p for n, p in model.named_parameters()
  219. if all([k not in n for k in keys])
  220. ]
  221. }]
  222. del optim_args['without_weight_decay_params']
  223. else:
  224. params = model.parameters()
  225. return op(learning_rate=learning_rate,
  226. parameters=params,
  227. grad_clip=grad_clip,
  228. **optim_args)
  229. class ModelEMA(object):
  230. """
  231. Exponential Weighted Average for Deep Neutal Networks
  232. Args:
  233. model (nn.Layer): Detector of model.
  234. decay (int): The decay used for updating ema parameter.
  235. Ema's parameter are updated with the formula:
  236. `ema_param = decay * ema_param + (1 - decay) * cur_param`.
  237. Defaults is 0.9998.
  238. use_thres_step (bool): Whether set decay by thres_step or not
  239. cycle_epoch (int): The epoch of interval to reset ema_param and
  240. step. Defaults is -1, which means not reset. Its function is to
  241. add a regular effect to ema, which is set according to experience
  242. and is effective when the total training epoch is large.
  243. """
  244. def __init__(self,
  245. model,
  246. decay=0.9998,
  247. use_thres_step=False,
  248. cycle_epoch=-1):
  249. self.step = 0
  250. self.epoch = 0
  251. self.decay = decay
  252. self.state_dict = dict()
  253. for k, v in model.state_dict().items():
  254. self.state_dict[k] = paddle.zeros_like(v)
  255. self.use_thres_step = use_thres_step
  256. self.cycle_epoch = cycle_epoch
  257. def reset(self):
  258. self.step = 0
  259. self.epoch = 0
  260. for k, v in self.state_dict.items():
  261. self.state_dict[k] = paddle.zeros_like(v)
  262. def update(self, model):
  263. if self.use_thres_step:
  264. decay = min(self.decay, (1 + self.step) / (10 + self.step))
  265. else:
  266. decay = self.decay
  267. self._decay = decay
  268. model_dict = model.state_dict()
  269. for k, v in self.state_dict.items():
  270. v = decay * v + (1 - decay) * model_dict[k]
  271. v.stop_gradient = True
  272. self.state_dict[k] = v
  273. self.step += 1
  274. def apply(self):
  275. if self.step == 0:
  276. return self.state_dict
  277. state_dict = dict()
  278. for k, v in self.state_dict.items():
  279. v = v / (1 - self._decay**self.step)
  280. v.stop_gradient = True
  281. state_dict[k] = v
  282. self.epoch += 1
  283. if self.cycle_epoch > 0 and self.epoch == self.cycle_epoch:
  284. self.reset()
  285. return state_dict