-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathconfig.py
165 lines (135 loc) · 7.82 KB
/
config.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
<<<<<<< HEAD
import torch
import argparse
parser = argparse.ArgumentParser(description='Script Description')
# # File Paths
# parser.add_argument('--images_folder', type=str, default='Dataset/jpg', help='Path to the folder containing images')
# parser.add_argument('--annotation_file', type=str, default='Dataset/imagelabels.mat', help='Path to the annotation file')
# parser.add_argument('--txt_results', type=str, default='logs/logs.txt', help='Path to the text results file')
# parser.add_argument('--logfile', type=str, default='logs/model.pt', help='Path to the log file')
# parser.add_argument('--csv_dataset_file', type=str, default='Dataset/dataset.csv', help='Path to the CSV dataset file')
# parser.add_argument('--cat_to_name_file', type=str, default='Dataset/cat_to_name.json', help='Path to the JSON file mapping category to name')
# parser.add_argument('--checkpoint_file', type=str, default='logs/checkpoints.pt', help='Path to the checkpoint file')
# parser.add_argument('--train_file', type=str, default='Dataset/train_dataset.csv', help='Path to the train dataset file')
# parser.add_argument('--valid_pairs_file', type=str, default='Dataset/val_augmented_pair.csv', help='Path to the validation pairs file')
# parser.add_argument('--test_pairs_file', type=str, default='Dataset/test_augmented_pair.csv', help='Path to the test pairs file')
# parser.add_argument('--data_folder', type=str, default='Dataset/jpg', help='Path to the folder containing data')
# # Device and Random State
# parser.add_argument('--device', type=str, default='cuda' if torch.cuda.is_available() else 'cpu', help='Device to use (cuda or cpu)')
# parser.add_argument('--random_state', type=int, default=47, help='Random seed for reproducibility')
# # Test Size and Image Size
# parser.add_argument('--test_size', type=float, default=0.2, help='Proportion of dataset to include in the test split')
# parser.add_argument('--image_size', type=int, nargs=2, default=[150, 150], help='Image size (width, height) for resizing')
# # Number of Retrieves
# parser.add_argument('--number_of_retrieves', type=int, default=50, help='Number of images to retrieve in retrieval task')
# # Hyperparameters
# parser.add_argument('--batch_size', type=int, default=128, help='Batch size for training')
# parser.add_argument('--learning_rate', type=float, default=0.001, help='Learning rate for training')
# parser.add_argument('--num_epochs', type=int, default=400, help='Number of epochs for training')
# # Center Loss Hyperparameters
# parser.add_argument('--center_learning_rate', type=float, default=0.3, help='Learning rate for center loss')
# parser.add_argument('--num_classes', type=int, default=82, help='Number of classes in the dataset')
# parser.add_argument('--alpha', type=float, default=1, help='Weight for center loss')
# parser.add_argument('--feature_dim', type=int, default=2048, help='Feature dimension for center loss')
# parser.add_argument('--lambda_', type=int, default=10, help='Weight for center loss regularization')
# args = parser.parse_args()
IMAGES_FOLDER = 'Dataset/jpg'
ANNOTATION_FILE = 'Dataset/imagelabels.mat'
TXT_RESULTS = 'logs/logs.txt'
LOGFILE = 'logs/model.pt'
CSV_DATASET_FILE = 'Dataset/dataset.csv'
CAT_TO_NAME_FILE = 'Dataset/cat_to_name.json'
CHECKPOINT_FILE = 'logs/checkpoints.pt'
TRAIN_FILE = 'Dataset/train_dataset.csv'
VALID_PAIRS_FILE = 'Dataset/val_augmented_pair.csv'
TEST_PAIRS_FILE = 'Dataset/test_augmented_pair.csv'
DATA_FOLDER = 'Dataset/jpg'
DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# DEVICE = 'cuda'
# DEVICE = 'cpu'
RANDOM_STATE = 47
TEST_SIZE = 0.2
IMAGE_SIZE = (128,128)
NUMBER_OF_RETRIEVES = 50
# Hyperparameters
BATCH_SIZE = 256
LEARNING_RATE = 0.005
NUM_EPOCHS= 100
# Losses Hyperparameters
CENTER_LEARNING_RATE = 0.3
NUM_CLASSES = 82
ALPHA = 1
FEATURE_DIM = 8192
LAMBDA_C = 0.01
MARGIN = 0.5
SCALE = 30.0
# models config
EFFICIENT_NETV2_IN_FEATURES = 1280
=======
import torch
import argparse
parser = argparse.ArgumentParser(description='Script Description')
# # File Paths
# parser.add_argument('--images_folder', type=str, default='Dataset/jpg', help='Path to the folder containing images')
# parser.add_argument('--annotation_file', type=str, default='Dataset/imagelabels.mat', help='Path to the annotation file')
# parser.add_argument('--txt_results', type=str, default='logs/logs.txt', help='Path to the text results file')
# parser.add_argument('--logfile', type=str, default='logs/model.pt', help='Path to the log file')
# parser.add_argument('--csv_dataset_file', type=str, default='Dataset/dataset.csv', help='Path to the CSV dataset file')
# parser.add_argument('--cat_to_name_file', type=str, default='Dataset/cat_to_name.json', help='Path to the JSON file mapping category to name')
# parser.add_argument('--checkpoint_file', type=str, default='logs/checkpoints.pt', help='Path to the checkpoint file')
# parser.add_argument('--train_file', type=str, default='Dataset/train_dataset.csv', help='Path to the train dataset file')
# parser.add_argument('--valid_pairs_file', type=str, default='Dataset/val_augmented_pair.csv', help='Path to the validation pairs file')
# parser.add_argument('--test_pairs_file', type=str, default='Dataset/test_augmented_pair.csv', help='Path to the test pairs file')
# parser.add_argument('--data_folder', type=str, default='Dataset/jpg', help='Path to the folder containing data')
# # Device and Random State
# parser.add_argument('--device', type=str, default='cuda' if torch.cuda.is_available() else 'cpu', help='Device to use (cuda or cpu)')
# parser.add_argument('--random_state', type=int, default=47, help='Random seed for reproducibility')
# # Test Size and Image Size
# parser.add_argument('--test_size', type=float, default=0.2, help='Proportion of dataset to include in the test split')
# parser.add_argument('--image_size', type=int, nargs=2, default=[150, 150], help='Image size (width, height) for resizing')
# # Number of Retrieves
# parser.add_argument('--number_of_retrieves', type=int, default=50, help='Number of images to retrieve in retrieval task')
# # Hyperparameters
# parser.add_argument('--batch_size', type=int, default=128, help='Batch size for training')
# parser.add_argument('--learning_rate', type=float, default=0.001, help='Learning rate for training')
# parser.add_argument('--num_epochs', type=int, default=400, help='Number of epochs for training')
# # Center Loss Hyperparameters
# parser.add_argument('--center_learning_rate', type=float, default=0.3, help='Learning rate for center loss')
# parser.add_argument('--num_classes', type=int, default=82, help='Number of classes in the dataset')
# parser.add_argument('--alpha', type=float, default=1, help='Weight for center loss')
# parser.add_argument('--feature_dim', type=int, default=2048, help='Feature dimension for center loss')
# parser.add_argument('--lambda_', type=int, default=10, help='Weight for center loss regularization')
# args = parser.parse_args()
IMAGES_FOLDER = 'Dataset/jpg'
ANNOTATION_FILE = 'Dataset/imagelabels.mat'
TXT_RESULTS = 'logs/logs.txt'
LOGFILE = 'logs/model.pt'
CSV_DATASET_FILE = 'Dataset/dataset.csv'
CAT_TO_NAME_FILE = 'Dataset/cat_to_name.json'
CHECKPOINT_FILE = 'logs/checkpoints.pt'
TRAIN_FILE = 'Dataset/train_dataset.csv'
VALID_PAIRS_FILE = 'Dataset/val_augmented_pair.csv'
TEST_PAIRS_FILE = 'Dataset/test_augmented_pair.csv'
DATA_FOLDER = 'Dataset/jpg'
DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# DEVICE = 'cuda'
# DEVICE = 'cpu'
RANDOM_STATE = 47
TEST_SIZE = 0.2
IMAGE_SIZE = (128,128)
NUMBER_OF_RETRIEVES = 50
# Hyperparameters
BATCH_SIZE = 512
LEARNING_RATE = 0.005
NUM_EPOCHS= 100
# Losses Hyperparameters
CENTER_LEARNING_RATE = 0.3
NUM_CLASSES = 82
ALPHA = 1
FEATURE_DIM = 512
LAMBDA_C = 0.01
MARGIN = 0.5
SCALE = 30.0
# models config
EFFICIENT_NETV2_IN_FEATURES = 1280
>>>>>>> ca7d9da84a74cc9f1a85f0eb812d853e11a9db46