seg_dataset.py 4.0 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091
  1. # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. import os.path as osp
  15. import copy
  16. from paddle.io import Dataset
  17. from paddlex.utils import logging, get_num_workers, get_encoding, path_normalization, is_pic
  18. class SegDataset(Dataset):
  19. """读取语义分割任务数据集,并对样本进行相应的处理。
  20. Args:
  21. data_dir (str): 数据集所在的目录路径。
  22. file_list (str): 描述数据集图片文件和对应标注文件的文件路径(文本内每行路径为相对data_dir的相对路)。
  23. label_list (str): 描述数据集包含的类别信息文件路径。默认值为None。
  24. transforms (paddlex.transforms): 数据集中每个样本的预处理/增强算子。
  25. num_workers (int|str): 数据集中样本在预处理过程中的线程或进程数。默认为'auto'。
  26. shuffle (bool): 是否需要对数据集中样本打乱顺序。默认为False。
  27. """
  28. def __init__(self,
  29. data_dir,
  30. file_list,
  31. label_list=None,
  32. transforms=None,
  33. num_workers='auto',
  34. shuffle=False):
  35. super(SegDataset, self).__init__()
  36. self.transforms = copy.deepcopy(transforms)
  37. # TODO batch padding
  38. self.batch_transforms = None
  39. self.num_workers = get_num_workers(num_workers)
  40. self.shuffle = shuffle
  41. self.file_list = list()
  42. self.labels = list()
  43. # TODO:非None时,让用户跳转数据集分析生成label_list
  44. # 不要在此处分析label file
  45. if label_list is not None:
  46. with open(label_list, encoding=get_encoding(label_list)) as f:
  47. for line in f:
  48. item = line.strip()
  49. self.labels.append(item)
  50. with open(file_list, encoding=get_encoding(file_list)) as f:
  51. for line in f:
  52. items = line.strip().split()
  53. if len(items) > 2:
  54. raise Exception(
  55. "A space is defined as the delimiter to separate the image and label path, " \
  56. "so the space cannot be in the image or label path, but the line[{}] of " \
  57. " file_list[{}] has a space in the image or label path.".format(line, file_list))
  58. items[0] = path_normalization(items[0])
  59. items[1] = path_normalization(items[1])
  60. if not is_pic(items[0]) or not is_pic(items[1]):
  61. continue
  62. full_path_im = osp.join(data_dir, items[0])
  63. full_path_label = osp.join(data_dir, items[1])
  64. if not osp.exists(full_path_im):
  65. raise IOError('Image file {} does not exist!'.format(
  66. full_path_im))
  67. if not osp.exists(full_path_label):
  68. raise IOError('Label file {} does not exist!'.format(
  69. full_path_label))
  70. self.file_list.append({
  71. 'image': full_path_im,
  72. 'mask': full_path_label
  73. })
  74. self.num_samples = len(self.file_list)
  75. logging.info("{} samples in file {}".format(
  76. len(self.file_list), file_list))
  77. def __getitem__(self, idx):
  78. sample = copy.deepcopy(self.file_list[idx])
  79. outputs = self.transforms(sample)
  80. return outputs
  81. def __len__(self):
  82. return len(self.file_list)