train.py 2.1 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556
  1. import paddlex as pdx
  2. from paddlex import transforms as T
  3. # 定义训练和验证时的transforms
  4. # API说明:https://github.com/PaddlePaddle/PaddleX/blob/release/2.0-rc/paddlex/cv/transforms/operators.py
  5. train_transforms = T.Compose([
  6. T.MixupImage(mixup_epoch=250), T.RandomDistort(),
  7. T.RandomExpand(im_padding_value=[123.675, 116.28, 103.53]), T.RandomCrop(),
  8. T.RandomHorizontalFlip(), T.BatchRandomResize(
  9. target_sizes=[320, 352, 384, 416, 448, 480, 512, 544, 576, 608],
  10. interp='RANDOM'), T.Normalize(
  11. mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
  12. ])
  13. eval_transforms = T.Compose([
  14. T.Resize(
  15. 608, interp='CUBIC'), T.Normalize(
  16. mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
  17. ])
  18. # 定义训练和验证所用的数据集
  19. # API说明:https://github.com/PaddlePaddle/PaddleX/blob/release/2.0-rc/paddlex/cv/datasets/voc.py#L29
  20. train_dataset = pdx.datasets.VOCDetection(
  21. data_dir='dataset',
  22. file_list='dataset/train_list.txt',
  23. label_list='dataset/labels.txt',
  24. transforms=train_transforms,
  25. shuffle=True,
  26. num_worker=0)
  27. eval_dataset = pdx.datasets.VOCDetection(
  28. data_dir='dataset',
  29. file_list='dataset/val_list.txt',
  30. label_list='dataset/labels.txt',
  31. transforms=eval_transforms,
  32. shuffle=False,
  33. num_worker=0)
  34. # 初始化模型,并进行训练
  35. # 可使用VisualDL查看训练指标,参考https://github.com/PaddlePaddle/PaddleX/tree/release/2.0-rc/tutorials/train#visualdl可视化训练指标
  36. num_classes = len(train_dataset.labels)
  37. model = pdx.models.YOLOv3(num_classes=num_classes, backbone='DarkNet53')
  38. # API说明:https://github.com/PaddlePaddle/PaddleX/blob/release/2.0-rc/paddlex/cv/models/detector.py#L155
  39. # 各参数介绍与调整说明:https://paddlex.readthedocs.io/zh_CN/develop/appendix/parameters.html
  40. model.train(
  41. num_epochs=270,
  42. train_dataset=train_dataset,
  43. train_batch_size=2,
  44. eval_dataset=eval_dataset,
  45. learning_rate=0.001 / 8,
  46. warmup_steps=1000,
  47. warmup_start_lr=0.0,
  48. save_interval_epochs=5,
  49. lr_decay_epochs=[216, 243],
  50. save_dir='output/yolov3_darknet53')