unet.py 2.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657
  1. # 环境变量配置,用于控制是否使用GPU
  2. # 说明文档:https://paddlex.readthedocs.io/zh_CN/develop/appendix/parameters.html#gpu
  3. import os
  4. os.environ['CUDA_VISIBLE_DEVICES'] = '1'
  5. import paddlex as pdx
  6. from paddlex.seg import transforms
  7. # 定义训练和验证时的transforms
  8. # API说明 https://paddlex.readthedocs.io/zh_CN/develop/apis/transforms/seg_transforms.html
  9. train_transforms = transforms.Compose([
  10. transforms.RandomPaddingCrop(
  11. crop_size=256,
  12. im_padding_value=[127.5] * 6), transforms.RandomHorizontalFlip(),
  13. transforms.RandomVerticalFlip(), transforms.Normalize(
  14. mean=[0.5] * 6, std=[0.5] * 6, min_val=[0] * 6, max_val=[255] * 6)
  15. ])
  16. eval_transforms = transforms.Compose([
  17. transforms.Padding(
  18. target_size=256, im_padding_value=[127.5] * 6), transforms.Normalize(
  19. mean=[0.5] * 6, std=[0.5] * 6, min_val=[0] * 6, max_val=[255] * 6)
  20. ])
  21. # 定义训练和验证所用的数据集
  22. # API说明:https://paddlex.readthedocs.io/zh_CN/develop/apis/datasets.html#paddlex-datasets-segdataset
  23. train_dataset = pdx.datasets.ChangeDetDataset(
  24. data_dir='dataset',
  25. file_list='dataset/train_list.txt',
  26. label_list='dataset/labels.txt',
  27. transforms=train_transforms,
  28. shuffle=True)
  29. eval_dataset = pdx.datasets.ChangeDetDataset(
  30. data_dir='dataset',
  31. file_list='dataset/val_list.txt',
  32. label_list='dataset/labels.txt',
  33. transforms=eval_transforms)
  34. # 初始化模型,并进行训练
  35. # 可使用VisualDL查看训练指标,参考https://paddlex.readthedocs.io/zh_CN/develop/train/visualdl.html
  36. num_classes = len(train_dataset.labels)
  37. # API说明:https://paddlex.readthedocs.io/zh_CN/develop/apis/models/semantic_segmentation.html#paddlex-seg-deeplabv3p
  38. model = pdx.seg.UNet(num_classes=num_classes, input_channel=6)
  39. # API说明:https://paddlex.readthedocs.io/zh_CN/develop/apis/models/semantic_segmentation.html#train
  40. # 各参数介绍与调整说明:https://paddlex.readthedocs.io/zh_CN/develop/appendix/parameters.html
  41. model.train(
  42. num_epochs=400,
  43. train_dataset=train_dataset,
  44. train_batch_size=16,
  45. eval_dataset=eval_dataset,
  46. learning_rate=0.01,
  47. save_interval_epochs=10,
  48. pretrain_weights='CITYSCAPES',
  49. save_dir='output/unet',
  50. use_vdl=True)