-
Notifications
You must be signed in to change notification settings - Fork 0
/
dataset.py
78 lines (68 loc) · 3.09 KB
/
dataset.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
import os
from PIL import Image
import torch
from torch.utils import data
from torchvision import transforms
class ImageData(data.Dataset):
""" image dataset
img_root: image root (root which contain images)
label_root: label root (root which contains labels)
transform: pre-process for image
t_transform: pre-process for label
filename: combined use xxx.txt to recognize train-val-test data (only for combined)
"""
def __init__(self, img_root, label_root, transform, t_transform, filename=None):
print('filename is ' + filename)
if filename is None:
self.image_path = list(map(lambda x: os.path.join(img_root, x), os.listdir(img_root)))
self.label_path = list(
map(lambda x: os.path.join(label_root, x.split('/')[-1][:-3] + 'png'), self.image_path))
else:
lines = [line.rstrip('\n')[:-3] for line in open(filename)]
self.image_path = list(map(lambda x: os.path.join(img_root, x + 'jpg'), lines))
self.label_path = list(map(lambda x: os.path.join(label_root, x + 'png'), lines))
self.transform = transform
self.t_transform = t_transform
def __getitem__(self, item):
image = Image.open(self.image_path[item])
label = Image.open(self.label_path[item]).convert('L')
if self.transform is not None:
image = self.transform(image)
if self.t_transform is not None:
label = self.t_transform(label)
return image, label
def __len__(self):
return len(self.image_path)
# get the dataloader (Note: without data augmentation)
def get_loader(img_root, label_root, img_size, batch_size, filename=None, mode='train', num_thread=4, pin=True):
if mode == 'train':
transform = transforms.Compose([
transforms.Resize((img_size, img_size)),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])
t_transform = transforms.Compose([
transforms.Resize((img_size, img_size)),
transforms.ToTensor(),
transforms.Lambda(torch.round) # TODO: it maybe unnecessary
])
dataset = ImageData(img_root, label_root, transform, t_transform, filename=filename)
data_loader = data.DataLoader(dataset=dataset, batch_size=batch_size, shuffle=True, num_workers=num_thread,
pin_memory=pin)
return data_loader
else:
t_transform = transforms.Compose([
transforms.ToTensor(),
transforms.Lambda(torch.round) # TODO: it maybe unnecessary
])
dataset = ImageData(img_root, label_root, None, t_transform, filename=filename)
return dataset
if __name__ == '__main__':
import numpy as np
img_root = 'data/combined/image'
label_root = 'data/combined/annotation'
filename = 'data/combined/combined_train.txt'
loader = get_loader(img_root, label_root, 224, 1, filename=filename, mode='test')
for image, label in loader:
print(np.array(image).shape)
break