6.train_ppyolov2_r50_aug_COCO_addneg.py 2.1 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465
  1. # coding:utf-8
  2. import os
  3. # 选择使用0号卡
  4. os.environ['CUDA_VISIBLE_DEVICES'] = '0'
  5. import paddlex as pdx
  6. from paddlex import transforms as T
  7. # 定义训练和验证时的transforms
  8. train_transforms = T.Compose([
  9. T.MixupImage(mixup_epoch=-1), T.RandomDistort(),
  10. T.RandomExpand(im_padding_value=[123.675, 116.28, 103.53]), T.RandomCrop(),
  11. T.RandomHorizontalFlip(), T.BatchRandomResize(
  12. target_sizes=[
  13. 320, 352, 384, 416, 448, 480, 512, 544, 576, 608, 640, 672, 704,
  14. 736, 768
  15. ],
  16. interp='RANDOM'), T.Normalize(
  17. mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
  18. ])
  19. eval_transforms = T.Compose([
  20. T.Resize(
  21. target_size=640, interp='CUBIC'), T.Normalize(
  22. mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
  23. ])
  24. # 定义训练和验证所用的数据集
  25. # train_list_double.txt训练文件路径多写了一遍,从而增加有目标图片的占比
  26. train_dataset = pdx.datasets.VOCDetection(
  27. data_dir='/home/aistudio/dataset',
  28. file_list='/home/aistudio/dataset/train_list_double.txt',
  29. label_list='/home/aistudio/dataset/labels.txt',
  30. transforms=train_transforms,
  31. num_workers=0,
  32. shuffle=True)
  33. eval_dataset = pdx.datasets.VOCDetection(
  34. data_dir='/home/aistudio/dataset',
  35. file_list='/home/aistudio/dataset/val_list.txt',
  36. label_list='/home/aistudio/dataset/labels.txt',
  37. transforms=eval_transforms,
  38. num_workers=0,
  39. shuffle=False)
  40. # 把背景图片加入训练集中
  41. train_dataset.add_negative_samples(image_dir='/home/aistudio/dataset/train_neg')
  42. # 初始化模型,并进行训练
  43. # API说明: https://paddlex.readthedocs.io/zh_CN/develop/apis/models/detection.html#paddlex-det-yolov3
  44. num_classes = len(train_dataset.labels)
  45. model = pdx.det.PPYOLOv2(num_classes=num_classes, backbone='ResNet50_vd_dcn')
  46. model.train(
  47. num_epochs=270,
  48. train_dataset=train_dataset,
  49. train_batch_size=8,
  50. eval_dataset=eval_dataset,
  51. pretrain_weights='COCO',
  52. learning_rate=0.005 / 12,
  53. warmup_steps=1000,
  54. warmup_start_lr=0.0,
  55. lr_decay_epochs=[105, 135, 150],
  56. save_interval_epochs=5,
  57. save_dir='output/ppyolov2_r50vd_dcn_coco_aug_addneg')