import paddlex as pdx from paddlex import transforms as T # 下载和解压昆虫检测数据集 dataset = 'https://bj.bcebos.com/paddlex/datasets/insect_det.tar.gz' pdx.utils.download_and_decompress(dataset, path='./') # 定义训练和验证时的transforms # API说明:https://github.com/PaddlePaddle/PaddleX/blob/develop/docs/apis/transforms/transforms.md train_transforms = T.Compose([ T.MixupImage(mixup_epoch=250), T.RandomDistort(), T.RandomExpand(im_padding_value=[123.675, 116.28, 103.53]), T.RandomCrop(), T.RandomHorizontalFlip(), T.BatchRandomResize( target_sizes=[320, 352, 384, 416, 448, 480, 512, 544, 576, 608], interp='RANDOM'), T.Normalize( mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) ]) eval_transforms = T.Compose([ T.Resize( 608, interp='CUBIC'), T.Normalize( mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) ]) # 定义训练和验证所用的数据集 # API说明:https://github.com/PaddlePaddle/PaddleX/blob/develop/docs/apis/datasets.md train_dataset = pdx.datasets.VOCDetection( data_dir='insect_det', file_list='insect_det/train_list.txt', label_list='insect_det/labels.txt', transforms=train_transforms, shuffle=True) eval_dataset = pdx.datasets.VOCDetection( data_dir='insect_det', file_list='insect_det/val_list.txt', label_list='insect_det/labels.txt', transforms=eval_transforms, shuffle=False) # 加载模型 model = pdx.load_model('output/yolov3_darknet53/best_model') # Step 1/3: 分析模型各层参数在不同的剪裁比例下的敏感度 # API说明:https://github.com/PaddlePaddle/PaddleX/blob/develop/docs/apis/models/detection.md#analyze_sensitivity model.analyze_sensitivity( dataset=eval_dataset, batch_size=1, save_dir='output/yolov3_darknet53/prune') # Step 2/3: 根据选择的FLOPs减小比例对模型进行剪裁 # API说明:https://github.com/PaddlePaddle/PaddleX/blob/develop/docs/apis/models/detection.md#prune model.prune(pruned_flops=.2) # Step 3/3: 对剪裁后的模型重新训练 # API说明:https://github.com/PaddlePaddle/PaddleX/blob/develop/docs/apis/models/detection.md # 各参数介绍与调整说明:https://github.com/PaddlePaddle/PaddleX/blob/develop/docs/apis/models/detection.md#train model.train( num_epochs=270, train_dataset=train_dataset, train_batch_size=8, eval_dataset=eval_dataset, pretrain_weights=None, learning_rate=0.001 / 8, warmup_steps=1000, warmup_start_lr=0.0, save_interval_epochs=5, lr_decay_epochs=[216, 243], save_dir='output/yolov3_darknet53/prune')