guang_2.py 3.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687
  1. # 环境变量配置,用于控制是否使用GPU
  2. # 说明文档:https://paddlex.readthedocs.io/zh_CN/develop/appendix/parameters.html#gpu
  3. import os
  4. os.environ['CUDA_VISIBLE_DEVICES'] = '0,1,2,3'
  5. import json
  6. from paddlex.det import transforms
  7. import paddlex as pdx
  8. # API说明 https://paddlex.readthedocs.io/zh_CN/develop/apis/transforms/det_transforms.html
  9. train_transforms = transforms.Compose([
  10. transforms.RandomHorizontalFlip(), transforms.Normalize(),
  11. transforms.ResizeByShort(
  12. short_size=800, max_size=1333), transforms.Padding(coarsest_stride=32)
  13. ])
  14. eval_transforms = transforms.Compose([
  15. transforms.Normalize(),
  16. transforms.ResizeByShort(
  17. short_size=800, max_size=1333),
  18. transforms.Padding(coarsest_stride=32),
  19. ])
  20. # 定义训练和验证所用的数据集
  21. # API说明:https://paddlex.readthedocs.io/zh_CN/develop/apis/datasets.html#paddlex-datasets-vocdetection
  22. #train_dataset = pdx.datasets.VOCDetection(
  23. # data_dir='dataset',
  24. # file_list='dataset/train_list.txt',
  25. # label_list='dataset/labels.txt',
  26. # transforms=train_transforms,
  27. # num_workers=2,
  28. # shuffle=True)
  29. eval_dataset = pdx.datasets.VOCDetection(
  30. data_dir='dataset',
  31. file_list='dataset/val_list.txt',
  32. label_list='dataset/labels.txt',
  33. num_workers=2,
  34. transforms=eval_transforms)
  35. # 初始化模型,并进行训练
  36. # 可使用VisualDL查看训练指标,参考https://paddlex.readthedocs.io/zh_CN/develop/train/visualdl.html
  37. # num_classes 需要设置为包含背景类的类别数,即: 目标类别数量 + 1
  38. #num_classes = len(train_dataset.labels) + 1
  39. #
  40. ## API说明: https://paddlex.readthedocs.io/zh_CN/develop/apis/models/detection.html#paddlex-det-fasterrcnn
  41. #model = pdx.det.FasterRCNN(num_classes=num_classes, backbone='ResNet50_vd')
  42. #
  43. ## API说明: https://paddlex.readthedocs.io/zh_CN/develop/apis/models/detection.html#id1
  44. ## 各参数介绍与调整说明:https://paddlex.readthedocs.io/zh_CN/develop/appendix/parameters.html
  45. #model.train(
  46. # num_epochs=36,
  47. # train_dataset=train_dataset,
  48. # train_batch_size=8,
  49. # eval_dataset=eval_dataset,
  50. # learning_rate=0.01,
  51. # lr_decay_epochs=[24, 33],
  52. # warmup_steps=1000,
  53. # pretrain_weights='ResNet50_vd_ssld_pretrained',
  54. # save_dir='output/guan_2',
  55. # use_vdl=False)
  56. #eval_dataset = pdx.datasets.CocoDetection(
  57. # data_dir='dataset_coco/JPEGImages',
  58. # ann_file='dataset_coco/val.json',
  59. # num_workers=2,
  60. # transforms=eval_transforms)
  61. #model = pdx.load_model('output/guan_4/best_model/')
  62. #eval_details = model.evaluate(eval_dataset, batch_size=8, return_details=True)
  63. class MyEncoder(json.JSONEncoder):
  64. def default(self, obj):
  65. if isinstance(obj, np.integer):
  66. return int(obj)
  67. elif isinstance(obj, np.floating):
  68. return float(obj)
  69. elif isinstance(obj, np.ndarray):
  70. return obj.tolist()
  71. else:
  72. return super(MyEncoder, self).default(obj)
  73. with open('output/guan_4/best_model/eval_details.json', 'r') as f:
  74. eval_details = json.load(f)
  75. json_path = 'output/guan_4/best_model/gt.json'
  76. json.dump(eval_details['gt'], open(json_path, "w"), indent=4, cls=MyEncoder)
  77. json_path = 'output/guan_4/best_model/bbox.json'
  78. json.dump(eval_details['bbox'], open(json_path, "w"), indent=4, cls=MyEncoder)