-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathtrain_deepfashion2_general_labels.py
153 lines (133 loc) · 6.92 KB
/
train_deepfashion2_general_labels.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
if __name__ == "__main__":
import torch.optim as optim
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
# import torch
# torch.backends.cudnn.enabled = False
import warnings
warnings.filterwarnings("ignore")
from models import *
from datasets import *
from train import *
seed = None # random.randint(0, 1024)
# torch.manual_seed(seed)
image_size = (226, 300)
scale_range = (0.3, 1.2)
# datasets
train_dataset = DeepFashion2DatasetGeneral(
image_dir='../deepfashion2/train/image',
anno_dir='../deepfashion2/train/annos',
output_size=image_size,
return_bbox=True,
)
val_dataset = DeepFashion2DatasetGeneral(
image_dir='../deepfashion2/validation/image',
anno_dir='../deepfashion2/validation/annos',
output_size=image_size,
return_bbox=True,
)
# train_dataset, val_dataset, test_dataset = random_split(full_dataset, [train_size, val_size, test_size], seed=0) # [train_size, val_size, test_size]) [1, 1, len(full_dataset)-2])
train_dataset = AugmentedDeepFashion2Dataset(
dataset_source=train_dataset, output_size=image_size,
flip_prob=0.5, crop_ratio=(1, 1), scale_factor=(0.8, 1.1),
noise_level=(0, 1), blur_radius=(0, 3), brightness_factor=(0.85, 1.25),
seed=seed,
)
# replace with augmented dataset
# val_dataset = AugmentedDataset(
# dataset_source=val_dataset, output_size=image_size,
# flip_prob=0.5, crop_ratio=(1, 1), scale_factor=(1, 1),
# noise_level=(0, 0), blur_radius=(0, 0), brightness_factor=(1, 1),
# seed=seed,
# )
# dataLoaders
train_loader = DataLoader(train_dataset, batch_size=16, shuffle=True, num_workers=8,
collate_fn=collate_fn_DeepFashion2)
# collate_fn=collate_fn_DeepFashion2)
val_loader = DataLoader(val_dataset, batch_size=1, shuffle=False, num_workers=8, collate_fn=collate_fn_DeepFashion2)
# collate_fn=collate_fn_DeepFashion2)
# choose device
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# model
num_classes = len(DeepFashion2Dataset.categories)
model = SegmentPredictorBbox(num_masks=num_classes + 4, num_labels=num_classes + 4, num_bbox_classes=4)
model.to(device)
# optimizer
# optimizer
criterion_mask = nn.BCELoss()
criterion_pred = nn.BCELoss()
criterion_bbox = nn.L1Loss(reduction='none')
optimizer = optim.Adam(model.parameters(), lr=1e-4)
# early stopping params
early_stopping_patience = 5
early_stopping_counter = 0
# check model saving dir
model_dir = "deepfashion2-segpredbbox-general-small"
if not os.path.isdir(model_dir):
os.makedirs(model_dir)
# Check for the latest saved model
latest_checkpoint = find_latest_checkpoint(model_dir)
if latest_checkpoint:
print(f"Loading model from {latest_checkpoint}")
model, optimizer, start_epoch, best_acc, counter = load_model(model, optimizer, path=latest_checkpoint)
start_epoch += 1
counter += 1
else:
start_epoch = 0
counter = 0
best_acc = 0.0
# TensorBoard writer
writer = SummaryWriter(model_dir)
# train loop
num_epochs = 60
for epoch in range(start_epoch, num_epochs):
print(f'Epoch {epoch + 1}/{num_epochs}')
print('-' * 10)
train_loss, mask_train_loss, pred_train_loss, avrg_mAP, avrg_f1, counter = train_DeepFashion2(model, optimizer,
train_loader,
criterion_mask,
criterion_pred,
criterion_bbox,
scale_range,
epoch, device,
writer, counter, )
val_loss, mask_val_loss, pred_val_loss, avrg_mAP, avrg_f1 = val_DeepFashion2(model, val_loader, criterion_mask,
criterion_pred, criterion_bbox,
epoch, device)
# write to TensorBoard
# writer.add_scalar('Loss/Train', train_loss, epoch)
writer.add_scalar('Loss/Validation', val_loss, epoch)
# writer.add_scalar('LossMask/Train', mask_train_loss, epoch)
writer.add_scalar('LossMask/Validation', mask_val_loss, epoch)
# writer.add_scalar('LossPred/Train', pred_train_loss, epoch)
writer.add_scalar('LossPred/Validation', pred_val_loss, epoch)
writer.add_scalar('LossBbox/Validation', pred_val_loss, epoch)
writer.add_scalar('MAP/Validation', avrg_mAP, epoch)
writer.add_scalar('F1/Validation', avrg_f1, epoch)
val_acc = (avrg_f1 + avrg_mAP) / 2
# train, validate, test
# train_loss, mask_train_loss, pred_train_loss, avrg_mAP, avrg_f1, avrg_iou, counter = train_DeepFashion2(model, optimizer, train_loader, scale_range, epoch, device, tb_writer=writer, counter=counter)
# val_loss, mask_val_loss, pred_val_loss, val_avrg_mAP, val_avrg_f1, val_avrg_iou = val_DeepFashion2(model, val_loader, (1, 1), epoch, device)
# # write to TensorBoard
# val_acc = 0.333 * val_avrg_mAP + 0.333 * val_avrg_f1 + 0.333 * val_avrg_iou
# writer.add_scalar('Loss/Validation', val_loss, epoch)
# writer.add_scalar('LossMask/Validation', mask_val_loss, epoch)
# writer.add_scalar('LossPred/Validation', pred_val_loss, epoch)
# writer.add_scalar('Accuracy/Validation', val_acc, epoch)
# writer.add_scalar('MAP/Validation', val_avrg_mAP, epoch)
# writer.add_scalar('F1/Validation', val_avrg_f1, epoch)
# writer.add_scalar('IOU/Validation', val_avrg_iou, epoch)
# save the model
if val_acc >= best_acc:
print(f"Accuracy increased ({best_acc:.2f} --> {val_acc:.2f}). Saving model ...")
save_model(epoch, model, optimizer, val_acc, path=f"{model_dir}/model_epoch_{epoch}.pth", counter=counter)
best_acc = val_acc
early_stopping_counter = 0
else:
early_stopping_counter += 1
print(
f"Validation loss did not improve. EarlyStopping counter: {early_stopping_counter}/{early_stopping_patience}")
if early_stopping_counter >= early_stopping_patience:
print("Early stopping")
break
writer.close()