-
Notifications
You must be signed in to change notification settings - Fork 8
/
preprocess.py
458 lines (401 loc) · 19.5 KB
/
preprocess.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
import os
from glob import glob
from scipy.io import loadmat
import cv2
from argparse import ArgumentParser
from tqdm import tqdm
import numpy as np
from typing import Tuple, Union, Optional
from warnings import warn
from datasets import standardize_dataset_name
def _calc_size(
img_w: int,
img_h: int,
min_size: int,
max_size: int,
base: int = 32
) -> Union[Tuple[int, int], None]:
"""
This function generates a new size for an image while keeping the aspect ratio. The new size should be within the given range (min_size, max_size).
Args:
img_w (int): The width of the image.
img_h (int): The height of the image.
min_size (int): The minimum size of the edges of the image.
max_size (int): The maximum size of the edges of the image.
"""
assert min_size % base == 0, f"min_size ({min_size}) must be a multiple of {base}"
if max_size != float("inf"):
assert max_size % base == 0, f"max_size ({max_size}) must be a multiple of {base} if provided"
assert min_size <= max_size, f"min_size ({min_size}) must be less than or equal to max_size ({max_size})"
aspect_ratios = (img_w / img_h, img_h / img_w)
if min_size / max_size <= min(aspect_ratios) <= max(aspect_ratios) <= max_size / min_size: # possible to resize and preserve the aspect ratio
if min_size <= min(img_w, img_h) <= max(img_w, img_h) <= max_size: # already within the range, no need to resize
ratio = 1.
elif min(img_w, img_h) < min_size: # smaller than the minimum size, resize to the minimum size
ratio = min_size / min(img_w, img_h)
else: # larger than the maximum size, resize to the maximum size
ratio = max_size / max(img_w, img_h)
new_w, new_h = int(round(img_w * ratio / base) * base), int(round(img_h * ratio / base) * base)
new_w = max(min_size, min(max_size, new_w))
new_h = max(min_size, min(max_size, new_h))
return new_w, new_h
else: # impossible to resize and preserve the aspect ratio
msg = f"Impossible to resize {img_w}x{img_h} image while preserving the aspect ratio to a size within the range ({min_size}, {max_size}). Will not limit the maximum size."
warn(msg)
return _calc_size(img_w, img_h, min_size, float("inf"), base)
def _generate_random_indices(
total_size: int,
out_dir: str,
) -> None:
"""
Generate randomly selected indices for labelled data in semi-supervised learning.
"""
rng = np.random.default_rng(42)
for percent in [0.01, 0.05, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]:
num_select = int(total_size * percent)
selected = rng.choice(total_size, num_select, replace=False)
selected.sort()
selected = selected.tolist()
with open(os.path.join(out_dir, f"{int(percent * 100)}%.txt"), "w") as f:
for i in selected:
f.write(f"{i}\n")
def _resize(image: np.ndarray, label: np.ndarray, min_size: int, max_size: int) -> Tuple[np.ndarray, np.ndarray, bool]:
image_h, image_w, _ = image.shape
new_size = _calc_size(image_w, image_h, min_size, max_size)
if new_size is None:
return image, label, False
else:
new_w, new_h = new_size
image = cv2.resize(image, (new_w, new_h), interpolation=cv2.INTER_CUBIC) if (new_w, new_h) != (image_w, image_h) else image
label = label * np.array([[new_w / image_w, new_h / image_h]]) if len(label) > 0 and (new_w, new_h) != (image_w, image_h) else label
return image, label, True
def _preprocess(
dataset: str,
data_src_dir: str,
data_dst_dir: str,
min_size: int,
max_size: int,
generate_npy: bool = False
) -> None:
"""
This function organizes the data into the following structure:
data_dst_dir
├── train
│ ├── images
│ │ ├── 0001.jpg
│ │ ├── 0002.jpg
│ │ ├── ...
│ │ images_npy
│ │ ├── 0001.npy
│ │ ├── 0002.npy
│ │ ├── ...
│ ├── labels
│ │ ├── 0001.npy
│ │ ├── 0002.npy
│ │ ├── ...
│ ├── 0.01%.txt
│ ├── 0.05%.txt
│ ├── ...
├── val
│ ├── images
│ │ ├── 0001.jpg
│ │ ├── 0002.jpg
│ │ ├── ...
│ │ images_npy
│ │ ├── 0001.npy
│ │ ├── 0002.npy
│ │ ├── ...
│ ├── labels
│ │ ├── 0001.npy
│ │ ├── 0002.npy
│ │ ├── ...
"""
dataset = standardize_dataset_name(dataset)
assert os.path.isdir(data_src_dir), f"{data_src_dir} does not exist"
os.makedirs(data_dst_dir, exist_ok=True)
print(f"Pre-processing {dataset} dataset...")
if dataset in ["sha", "shb"]:
_shanghaitech(data_src_dir, data_dst_dir, min_size, max_size, generate_npy)
elif dataset == "nwpu":
_nwpu(data_src_dir, data_dst_dir, min_size, max_size, generate_npy)
elif dataset == "qnrf":
_qnrf(data_src_dir, data_dst_dir, min_size, max_size, generate_npy)
else: # dataset == "jhu"
_jhu(data_src_dir, data_dst_dir, min_size, max_size, generate_npy)
def _resize_and_save(
image: np.ndarray,
name: str,
image_dst_dir: str,
generate_npy: bool,
label: Optional[np.ndarray] = None,
label_dst_dir: Optional[str] = None,
min_size: Optional[int] = None,
max_size: Optional[int] = None,
) -> None:
os.makedirs(image_dst_dir, exist_ok=True)
if label is not None:
assert label_dst_dir is not None, "label_dst_dir must be provided if label is provided"
os.makedirs(label_dst_dir, exist_ok=True)
image_dst_path = os.path.join(image_dst_dir, f"{name}.jpg")
if label is not None:
label_dst_path = os.path.join(label_dst_dir, f"{name}.npy")
else:
label = np.array([])
label_dst_path = None
if min_size is not None:
assert max_size is not None, f"max_size must be provided if min_size is provided, got {max_size}"
image, label, success = _resize(image, label, min_size, max_size)
if not success:
print(f"image: {image_dst_path} is not resized")
cv2.imwrite(image_dst_path, image)
if label_dst_path is not None:
np.save(label_dst_path, label)
if generate_npy:
image_npy_dst_path = os.path.join(image_dst_dir, f"{name}.npy")
image_npy = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) # convert to RGB
image_npy = np.transpose(image_npy, (2, 0, 1)) # HWC to CHW
# Don't normalize the image. Keep it as np.uint8 to save space.
# image_npy = image_npy.astype(np.float32) / 255. # normalize to [0, 1]
np.save(image_npy_dst_path, image_npy)
def _shanghaitech(
data_src_dir: str,
data_dst_dir: str,
min_size: int,
max_size: int,
generate_npy: bool = False
) -> None:
for split in ["train", "val"]:
generate_npy = generate_npy and split == "train"
print(f"Processing {split}...")
if split == "train":
image_src_dir = os.path.join(data_src_dir, "train_data", "images")
label_src_dir = os.path.join(data_src_dir, "train_data", "ground-truth")
image_src_paths = glob(os.path.join(image_src_dir, "*.jpg"))
label_src_paths = glob(os.path.join(label_src_dir, "*.mat"))
assert len(image_src_paths) == len(label_src_paths) in [300, 400], f"Expected 300 (part_A) or 400 (part_B) images and labels, got {len(image_src_paths)} images and {len(label_src_paths)} labels"
else:
image_src_dir = os.path.join(data_src_dir, "test_data", "images")
label_src_dir = os.path.join(data_src_dir, "test_data", "ground-truth")
image_src_paths = glob(os.path.join(image_src_dir, "*.jpg"))
label_src_paths = glob(os.path.join(label_src_dir, "*.mat"))
assert len(image_src_paths) == len(label_src_paths) in [182, 316], f"Expected 182 (part_A) or 316 (part_B) images and labels, got {len(image_src_paths)} images and {len(label_src_paths)} labels"
sort_key = lambda x: int((os.path.basename(x).split(".")[0]).split("_")[-1])
image_src_paths.sort(key=sort_key)
label_src_paths.sort(key=sort_key)
image_dst_dir = os.path.join(data_dst_dir, split, "images")
label_dst_dir = os.path.join(data_dst_dir, split, "labels")
os.makedirs(image_dst_dir, exist_ok=True)
os.makedirs(label_dst_dir, exist_ok=True)
size = len(str(len(image_src_paths)))
for i, (image_src_path, label_src_path) in tqdm(enumerate(zip(image_src_paths, label_src_paths)), total=len(image_src_paths)):
image_id = int((os.path.basename(image_src_path).split(".")[0]).split("_")[-1])
label_id = int((os.path.basename(label_src_path).split(".")[0]).split("_")[-1])
assert image_id == label_id, f"Expected image id {image_id} to match label id {label_id}"
name = f"{(i + 1):0{size}d}"
image = cv2.imread(image_src_path)
label = loadmat(label_src_path)["image_info"][0][0][0][0][0]
_resize_and_save(
image=image,
label=label,
name=name,
image_dst_dir=image_dst_dir,
label_dst_dir=label_dst_dir,
generate_npy=generate_npy,
min_size=min_size,
max_size=max_size
)
if split == "train":
_generate_random_indices(len(image_src_paths), os.path.join(data_dst_dir, split))
def _nwpu(
data_src_dir: str,
data_dst_dir: str,
min_size: int,
max_size: int,
generate_npy: bool = False
) -> None:
for split in ["train", "val"]:
generate_npy = generate_npy and split == "train"
print(f"Processing {split}...")
with open(os.path.join(data_src_dir, f"{split}.txt"), "r") as f:
indices = f.read().splitlines()
indices = [idx.split(" ")[0] for idx in indices]
image_src_paths = [os.path.join(data_src_dir, f"images_part{min(5, (int(idx) - 1) // 1000 + 1)}", f"{idx}.jpg") for idx in indices]
label_src_paths = [os.path.join(data_src_dir, "mats", f"{idx}.mat") for idx in indices]
image_dst_dir = os.path.join(data_dst_dir, split, "images")
label_dst_dir = os.path.join(data_dst_dir, split, "labels")
os.makedirs(image_dst_dir, exist_ok=True)
os.makedirs(label_dst_dir, exist_ok=True)
size = len(str(len(image_src_paths)))
for i, (image_src_path, label_src_path) in tqdm(enumerate(zip(image_src_paths, label_src_paths)), total=len(image_src_paths)):
image_id = os.path.basename(image_src_path).split(".")[0]
label_id = os.path.basename(label_src_path).split(".")[0]
assert image_id == label_id, f"Expected image id {image_id} to match label id {label_id}"
name = f"{(i + 1):0{size}d}"
image = cv2.imread(image_src_path)
label = loadmat(label_src_path)["annPoints"]
_resize_and_save(
image=image,
label=label,
name=name,
image_dst_dir=image_dst_dir,
label_dst_dir=label_dst_dir,
generate_npy=generate_npy,
min_size=min_size,
max_size=max_size
)
if split == "train":
_generate_random_indices(len(image_src_paths), os.path.join(data_dst_dir, split))
# preprocess the test set
split = "test"
print(f"Processing {split}...")
with open(os.path.join(data_src_dir, f"{split}.txt"), "r") as f:
indices = f.read().splitlines()
indices = [idx.split(" ")[0] for idx in indices]
image_src_paths = [os.path.join(data_src_dir, f"images_part{min(5, (int(idx) - 1) // 1000 + 1)}", f"{idx}.jpg") for idx in indices]
image_dst_dir = os.path.join(data_dst_dir, split, "images")
os.makedirs(image_dst_dir, exist_ok=True)
for image_src_path in tqdm(image_src_paths):
image_id = os.path.basename(image_src_path).split(".")[0]
image = cv2.imread(image_src_path)
_resize_and_save(
image=image,
label=None,
name=image_id,
image_dst_dir=image_dst_dir,
label_dst_dir=None,
generate_npy=generate_npy,
min_size=min_size,
max_size=max_size
)
def _qnrf(
data_src_dir: str,
data_dst_dir: str,
min_size: int,
max_size: int,
generate_npy: bool = False
) -> None:
for split in ["train", "val"]:
generate_npy = generate_npy and split == "train"
print(f"Processing {split}...")
if split == "train":
image_src_dir = os.path.join(data_src_dir, "Train")
label_src_dir = os.path.join(data_src_dir, "Train")
image_src_paths = glob(os.path.join(image_src_dir, "*.jpg"))
label_src_paths = glob(os.path.join(label_src_dir, "*.mat"))
assert len(image_src_paths) == len(label_src_paths) == 1201, f"Expected 1201 images and labels, got {len(image_src_paths)} images and {len(label_src_paths)} labels"
else:
image_src_dir = os.path.join(data_src_dir, "Test")
label_src_dir = os.path.join(data_src_dir, "Test")
image_src_paths = glob(os.path.join(image_src_dir, "*.jpg"))
label_src_paths = glob(os.path.join(label_src_dir, "*.mat"))
assert len(image_src_paths) == len(label_src_paths) == 334, f"Expected 334 images and labels, got {len(image_src_paths)} images and {len(label_src_paths)} labels"
sort_key = lambda x: int((os.path.basename(x).split(".")[0]).split("_")[1])
image_src_paths.sort(key=sort_key)
label_src_paths.sort(key=sort_key)
image_dst_dir = os.path.join(data_dst_dir, split, "images")
label_dst_dir = os.path.join(data_dst_dir, split, "labels")
os.makedirs(image_dst_dir, exist_ok=True)
os.makedirs(label_dst_dir, exist_ok=True)
size = len(str(len(image_src_paths)))
for i, (image_src_path, label_src_path) in tqdm(enumerate(zip(image_src_paths, label_src_paths)), total=len(image_src_paths)):
image_id = int((os.path.basename(image_src_path).split(".")[0]).split("_")[1])
label_id = int((os.path.basename(label_src_path).split(".")[0]).split("_")[1])
assert image_id == label_id, f"Expected image id {image_id} to match label id {label_id}"
name = f"{(i + 1):0{size}d}"
image = cv2.imread(image_src_path)
label = loadmat(label_src_path)["annPoints"]
_resize_and_save(
image=image,
label=label,
name=name,
image_dst_dir=image_dst_dir,
label_dst_dir=label_dst_dir,
generate_npy=generate_npy,
min_size=min_size,
max_size=max_size
)
if split == "train":
_generate_random_indices(len(image_src_paths), os.path.join(data_dst_dir, split))
def _jhu(
data_src_dir: str,
data_dst_dir: str,
min_size: int,
max_size: int,
generate_npy: bool = False
) -> None:
for split in ["train", "val"]:
generate_npy = generate_npy and split == "train"
if split == "train":
with open(os.path.join(data_src_dir, "train", "image_labels.txt"), "r") as f:
train_names = f.read().splitlines()
train_names = [name.split(",")[0] for name in train_names]
train_image_src_paths = [os.path.join(data_src_dir, "train", "images", f"{name}.jpg") for name in train_names]
train_label_src_paths = [os.path.join(data_src_dir, "train", "gt", f"{name}.txt") for name in train_names]
with open(os.path.join(data_src_dir, "val", "image_labels.txt"), "r") as f:
val_names = f.read().splitlines()
val_names = [name.split(",")[0] for name in val_names]
val_image_src_paths = [os.path.join(data_src_dir, "val", "images", f"{name}.jpg") for name in val_names]
val_label_src_paths = [os.path.join(data_src_dir, "val", "gt", f"{name}.txt") for name in val_names]
image_src_paths = train_image_src_paths + val_image_src_paths
label_src_paths = train_label_src_paths + val_label_src_paths
else:
with open(os.path.join(data_src_dir, "test", "image_labels.txt"), "r") as f:
test_names = f.read().splitlines()
test_names = [name.split(",")[0] for name in test_names]
image_src_paths = [os.path.join(data_src_dir, "test", "images", f"{name}.jpg") for name in test_names]
label_src_paths = [os.path.join(data_src_dir, "test", "gt", f"{name}.txt") for name in test_names]
image_dst_dir = os.path.join(data_dst_dir, split, "images")
label_dst_dir = os.path.join(data_dst_dir, split, "labels")
os.makedirs(image_dst_dir, exist_ok=True)
os.makedirs(label_dst_dir, exist_ok=True)
size = len(str(len(image_src_paths)))
for i, (image_src_path, label_src_path) in tqdm(enumerate(zip(image_src_paths, label_src_paths)), total=len(image_src_paths)):
image_id = int(os.path.basename(image_src_path).split(".")[0])
label_id = int(os.path.basename(label_src_path).split(".")[0])
assert image_id == label_id, f"Expected image id {image_id} to match label id {label_id}"
name = f"{(i + 1):0{size}d}"
image = cv2.imread(image_src_path)
with open(label_src_path, "r") as f:
label = f.read().splitlines()
label = np.array([list(map(float, line.split(" ")[0: 2])) for line in label])
_resize_and_save(
image=image,
label=label,
name=name,
image_dst_dir=image_dst_dir,
label_dst_dir=label_dst_dir,
generate_npy=generate_npy,
min_size=min_size,
max_size=max_size
)
if split == "train":
_generate_random_indices(len(image_src_paths), os.path.join(data_dst_dir, split))
def parse_args():
parser = ArgumentParser(description="Pre-process datasets to resize images and labeld into a given range.")
parser.add_argument(
"--dataset",
type=str,
choices=["nwpu", "ucf_qnrf", "jhu", "shanghaitech_a", "shanghaitech_b"],
required=True,
help="The dataset to pre-process."
)
parser.add_argument("--src_dir", type=str, required=True, help="The root directory of the source dataset.")
parser.add_argument("--dst_dir", type=str, required=True, help="The root directory of the destination dataset.")
parser.add_argument("--min_size", type=int, default=256, help="The minimum size of the shorter side of the image.")
parser.add_argument("--max_size", type=int, default=None, help="The maximum size of the longer side of the image.")
parser.add_argument("--generate_npy", action="store_true", help="Generate .npy files for images.")
args = parser.parse_args()
args.src_dir = os.path.abspath(args.src_dir)
args.dst_dir = os.path.abspath(args.dst_dir)
args.max_size = float("inf") if args.max_size is None else args.max_size
return args
if __name__ == "__main__":
args = parse_args()
_preprocess(
dataset=args.dataset,
data_src_dir=args.src_dir,
data_dst_dir=args.dst_dir,
min_size=args.min_size,
max_size=args.max_size,
generate_npy=args.generate_npy
)