train_pruned_yolov3.py 2.3 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758
  1. # 环境变量配置,用于控制是否使用GPU
  2. # 说明文档:https://paddlex.readthedocs.io/zh_CN/develop/appendix/parameters.html#gpu
  3. import os
  4. os.environ['CUDA_VISIBLE_DEVICES'] = '0'
  5. from paddlex.det import transforms
  6. import paddlex as pdx
  7. # 定义训练和验证时的transforms
  8. # API说明 https://paddlex.readthedocs.io/zh_CN/develop/apis/transforms/det_transforms.html
  9. train_transforms = transforms.Compose([
  10. transforms.MixupImage(mixup_epoch=250), transforms.RandomDistort(),
  11. transforms.RandomExpand(), transforms.RandomCrop(), transforms.Resize(
  12. target_size=608, interp='RANDOM'), transforms.RandomHorizontalFlip(),
  13. transforms.Normalize()
  14. ])
  15. eval_transforms = transforms.Compose([
  16. transforms.Resize(
  17. target_size=608, interp='CUBIC'), transforms.Normalize()
  18. ])
  19. # 定义训练和验证所用的数据集
  20. # API说明:https://paddlex.readthedocs.io/zh_CN/develop/apis/datasets.html#paddlex-datasets-vocdetection
  21. train_dataset = pdx.datasets.VOCDetection(
  22. data_dir='aluminum_inspection',
  23. file_list='aluminum_inspection/train_list.txt',
  24. label_list='aluminum_inspection/labels.txt',
  25. transforms=train_transforms,
  26. shuffle=True)
  27. eval_dataset = pdx.datasets.VOCDetection(
  28. data_dir='aluminum_inspection',
  29. file_list='aluminum_inspection/val_list.txt',
  30. label_list='aluminum_inspection/labels.txt',
  31. transforms=eval_transforms)
  32. # 初始化模型,并进行训练
  33. # 可使用VisualDL查看训练指标,参考https://paddlex.readthedocs.io/zh_CN/develop/train/visualdl.html
  34. num_classes = len(train_dataset.labels)
  35. # API说明: https://paddlex.readthedocs.io/zh_CN/develop/apis/models/detection.html#paddlex-det-yolov3
  36. model = pdx.det.YOLOv3(num_classes=num_classes, backbone='MobileNetV3_large')
  37. # API说明: https://paddlex.readthedocs.io/zh_CN/develop/apis/models/detection.html#train
  38. # 各参数介绍与调整说明:https://paddlex.readthedocs.io/zh_CN/develop/appendix/parameters.html
  39. model.train(
  40. num_epochs=400,
  41. train_dataset=train_dataset,
  42. train_batch_size=8,
  43. eval_dataset=eval_dataset,
  44. warmup_steps=4000,
  45. learning_rate=0.000125,
  46. lr_decay_epochs=[240, 320],
  47. pretrain_weights='output/yolov3_mobilenetv3/best_model',
  48. save_dir='output/yolov3_mobilenetv3_prune',
  49. use_vdl=True,
  50. sensitivities_file='./sensitivities.data',
  51. eval_metric_loss=0.05)