-
Notifications
You must be signed in to change notification settings - Fork 7
/
voc_dataset.py
121 lines (83 loc) · 2.89 KB
/
voc_dataset.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
import matplotlib.pyplot as plt
# %matplotlib inline
import PIL.Image as pilimg
import numpy as np
import torch
import torch.utils.data as data
import xml.etree.ElementTree as ET
import numpy as np
import os
import cv2
from config import opt
from lib.augmentations import preproc_for_test, preproc_for_train
VOC_LABELS = (
'aeroplane',
'bicycle',
'bird',
'boat',
'bottle',
'bus',
'car',
'cat',
'chair',
'cow',
'diningtable',
'dog',
'horse',
'motorbike',
'person',
'pottedplant',
'sheep',
'sofa',
'train',
'tvmonitor',
)
class VOCDetection(data.Dataset):
def __init__(self, opt, image_sets=[['2007', 'trainval'], ['2012', 'trainval']], is_train=True):
self.root = opt.VOC_ROOT
self.image_sets = image_sets
self.is_train = is_train
self.opt = opt
self.ids = []
for (year, name) in self.image_sets:
root_path = os.path.join(self.root, 'VOC' + name + year)
root_path = os.path.join(root_path, 'VOC' + year)
ano_file = os.path.join(root_path, 'ImageSets', 'Main', name + '.txt')
with open(ano_file, 'r') as f:
for line in f.readlines():
line = line.strip()
ano_path = os.path.join(root_path, 'Annotations', line + '.xml')
img_path = os.path.join(root_path, 'JPEGImages', line + '.jpg')
self.ids.append((img_path, ano_path))
def __getitem__(self, index):
img_path, ano_path = self.ids[index]
image = cv2.imread(img_path, cv2.IMREAD_COLOR)
boxes, labels = self.get_annotations(ano_path)
if self.is_train:
image, boxes, labels = preproc_for_train(image, boxes, labels, opt.min_size, opt.mean)
image = torch.from_numpy(image)
target = np.concatenate([boxes, labels.reshape(-1,1)], axis=1)
return image, target
def get_annotations(self, path):
tree = ET.parse(path)
boxes = []
labels = []
for child in tree.getroot():
if child.tag != 'object':
continue
bndbox = child.find('bndbox')
box =[
float(bndbox.find(t).text) - 1
for t in ['xmin', 'ymin', 'xmax', 'ymax']
]
label = VOC_LABELS.index(child.find('name').text)
boxes.append(box)
labels.append(label)
return np.array(boxes), np.array(labels)
def __len__(self):
return len(self.ids)
if __name__ == '__main__':
a=VOCDetection(opt, image_sets=[ ['2007', 'test']], is_train=True)
# a[0]
print(len(a))
# print(a.ids[0][0])