seg_split.py 3.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293
  1. # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. import os.path as osp
  15. import random
  16. from .utils import list_files, is_pic, replace_ext, read_seg_ann
  17. import paddlex.utils.logging as logging
  18. def split_seg_dataset(dataset_dir, val_percent, test_percent, save_dir):
  19. if not osp.exists(osp.join(dataset_dir, "JPEGImages")):
  20. logging.error("\'JPEGImages\' is not found in {}!".format(dataset_dir))
  21. if not osp.exists(osp.join(dataset_dir, "Annotations")):
  22. logging.error("\'Annotations\' is not found in {}!".format(
  23. dataset_dir))
  24. all_image_files = list_files(osp.join(dataset_dir, "JPEGImages"))
  25. image_anno_list = list()
  26. label_list = list()
  27. for image_file in all_image_files:
  28. if not is_pic(image_file):
  29. continue
  30. anno_name = replace_ext(image_file, "png")
  31. if osp.exists(osp.join(dataset_dir, "Annotations", anno_name)):
  32. image_anno_list.append([image_file, anno_name])
  33. else:
  34. anno_name = replace_ext(image_file, "PNG")
  35. if osp.exists(osp.join(dataset_dir, "Annotations", anno_name)):
  36. image_anno_list.append([image_file, anno_name])
  37. if not osp.exists(osp.join(dataset_dir, "labels.txt")):
  38. for image_anno in image_anno_list:
  39. labels = read_seg_ann(
  40. osp.join(dataset_dir, "Annotations", anno_name))
  41. for i in labels:
  42. if i not in label_list:
  43. label_list.append(i)
  44. # 如果类标签的最大值大于类别数,添加对应缺失的标签
  45. if len(label_list) != max(label_list) + 1:
  46. label_list = [i for i in range(max(label_list) + 1)]
  47. random.shuffle(image_anno_list)
  48. image_num = len(image_anno_list)
  49. val_num = int(image_num * val_percent)
  50. test_num = int(image_num * test_percent)
  51. train_num = image_num - val_num - test_num
  52. train_image_anno_list = image_anno_list[:train_num]
  53. val_image_anno_list = image_anno_list[train_num:train_num + val_num]
  54. test_image_anno_list = image_anno_list[train_num + val_num:]
  55. with open(
  56. osp.join(save_dir, 'train_list.txt'), mode='w',
  57. encoding='utf-8') as f:
  58. for x in train_image_anno_list:
  59. file = osp.join("JPEGImages", x[0])
  60. label = osp.join("Annotations", x[1])
  61. f.write('{} {}\n'.format(file, label))
  62. with open(
  63. osp.join(save_dir, 'val_list.txt'), mode='w',
  64. encoding='utf-8') as f:
  65. for x in val_image_anno_list:
  66. file = osp.join("JPEGImages", x[0])
  67. label = osp.join("Annotations", x[1])
  68. f.write('{} {}\n'.format(file, label))
  69. if len(test_image_anno_list):
  70. with open(
  71. osp.join(save_dir, 'test_list.txt'), mode='w',
  72. encoding='utf-8') as f:
  73. for x in test_image_anno_list:
  74. file = osp.join("JPEGImages", x[0])
  75. label = osp.join("Annotations", x[1])
  76. f.write('{} {}\n'.format(file, label))
  77. if len(label_list):
  78. with open(
  79. osp.join(save_dir, 'labels.txt'), mode='w',
  80. encoding='utf-8') as f:
  81. for l in sorted(label_list):
  82. f.write('{}\n'.format(l))
  83. return train_num, val_num, test_num