-
Notifications
You must be signed in to change notification settings - Fork 4
/
vslamlab.py
365 lines (294 loc) · 14.8 KB
/
vslamlab.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
"""
Module: VSLAM-LAB - vslamlab.py
- Author: Alejandro Fontan Villacampa
- Version: 1.0
- Created: 2024-07-04
- Updated: 2024-07-04
- License: GPLv3 License
- List of Known Dependencies;
* ...
"""
import argparse
import glob
import os
import sys
import time
import shutil
import re
import yaml
from inputimeout import inputimeout, TimeoutOccurred
from Evaluate import compare_functions
from Datasets.dataset_utilities import get_dataset
from Baselines.baseline_utilities import get_baseline
from path_constants import COMPARISONS_YAML_DEFAULT, CONFIG_DEFAULT, EXP_YAML_DEFAULT
from utilities import Experiment
from path_constants import VSLAMLAB_BENCHMARK, VSLAMLAB_EVALUATION, VSLAM_LAB_DIR, VSLAMLAB_BASELINES
from utilities import check_yaml_file_integrity
from utilities import list_datasets
from utilities import ws
from utilities import show_time
from Run.run_functions import run_sequence
from Evaluate.evaluate_functions import evaluate_sequence
SCRIPT_LABEL = f"\033[95m[{os.path.basename(__file__)}]\033[0m "
def main():
# Parse inputs
parser = argparse.ArgumentParser(description=f"{__file__}")
parser.add_argument('--exp_yaml', nargs='?', type=str,
const=EXP_YAML_DEFAULT, default=EXP_YAML_DEFAULT,
help=f"Path to the YAML file containing the list of experiments. "
f"Default \'vslamlab --exp_yaml {EXP_YAML_DEFAULT}\'")
parser.add_argument('-download', action='store_true', help="If none 'download/run/evaluate/compare' are activated, "
"all work by default.")
parser.add_argument('-run', action='store_true', help="")
parser.add_argument('-evaluate', action='store_true', help="")
parser.add_argument('-compare', action='store_true', help="")
parser.add_argument('--list_datasets', action='store_true', help="List available datasets.")
parser.add_argument('-ablation', action='store_true', help="")
args = parser.parse_args()
if not os.path.exists(VSLAMLAB_EVALUATION):
os.makedirs(VSLAMLAB_EVALUATION, exist_ok=True)
# Stuff to run demo
if not os.path.exists(os.path.join(VSLAMLAB_EVALUATION, "exp_demo_dso")):
shutil.copytree(os.path.join(VSLAM_LAB_DIR, "docs", "exp_demo_dso"),
os.path.join(VSLAMLAB_EVALUATION, "exp_demo_dso"))
if not os.path.exists(os.path.join(VSLAMLAB_EVALUATION, "exp_demo_orbslam2")):
shutil.copytree(os.path.join(VSLAM_LAB_DIR, "docs", "exp_demo_orbslam2"),
os.path.join(VSLAMLAB_EVALUATION, "exp_demo_orbslam2"))
print(f"\n{SCRIPT_LABEL}Created folder to store data: {VSLAMLAB_BENCHMARK}")
print(f"{SCRIPT_LABEL}Created folder to store evaluation: {VSLAMLAB_EVALUATION}")
# Info commands
if args.list_datasets:
print_datasets()
return
# Load experiment info
experiments, config_files = load_experiments(args.exp_yaml)
check_config_integrity(config_files)
# Process experiments
filter_inputs(args)
if args.download:
download(config_files)
if args.run:
run(experiments, args.exp_yaml, args.ablation)
if args.evaluate:
evaluate(experiments, args.ablation)
if args.compare:
compare(experiments, args.exp_yaml)
def filter_inputs(args):
if not args.download and not args.run and not args.evaluate and not args.compare:
args.download = True
args.run = True
args.evaluate = True
args.compare = True
def check_parameter_for_relative_path(parameter_value):
if "VSLAM-LAB" in parameter_value:
if ":" in parameter_value:
return re.sub(r'(?<=:)[^:]*VSLAM-LAB', VSLAM_LAB_DIR, str(parameter_value))
return re.sub(r'^.*VSLAM-LAB', VSLAM_LAB_DIR, str(parameter_value))
return parameter_value
def load_experiments(exp_yaml):
"""
Loads experiment configurations from a YAML file and initializes Experiment objects.
Parameters
----------
exp_yaml : str
Path to the YAML file containing experiment settings (default: VSLAM-LAB/docs/experimentList.yaml).
Returns
----------
experiments : dict
experiments<exp_name,Experiment()>
config_files : dict
config_files<config_yaml,False>
"""
check_yaml_file_integrity(exp_yaml)
with open(exp_yaml, 'r') as file:
experiment_data = yaml.safe_load(file)
experiments = {}
config_files = {}
for exp_name, settings in experiment_data.items():
experiment = Experiment()
active = settings.get('Active', True)
if not active:
continue
experiments[exp_name] = experiment
experiments[exp_name].config_yaml = os.path.join(VSLAM_LAB_DIR, 'configs',
settings.get('Config', CONFIG_DEFAULT))
config_files[experiments[exp_name].config_yaml] = False
experiments[exp_name].folder = os.path.join(VSLAMLAB_EVALUATION, exp_name)
experiments[exp_name].num_runs = settings.get('NumRuns', 1)
experiments[exp_name].module = settings.get('Module', "default")
experiments[exp_name].parameters = settings['Parameters']
#if settings['Parameters']:
# for parameter_name in settings['Parameters']:
# experiments[exp_name].parameters.append(
# check_parameter_for_relative_path(settings['Parameters'][parameter_name]))
print(f"\n{SCRIPT_LABEL}Experiment summary: {os.path.basename(exp_yaml)}")
print(f"{ws(4)} Number of experiments: {len(experiments)}")
#print(f"{ws(4)} Estimated data size: - ")
#run_time = estimate_experiments_time(experiments)
#if run_time > 60.0:
# print(f"{ws(4)} Estimated running time: {run_time/60.0} (h)")
#else:
# print(f"{ws(4)} Estimated running time: {run_time} (min)")
return experiments, config_files
def compare(experiments, exp_yaml):
comparison_path = os.path.join(VSLAMLAB_EVALUATION, f"comp_{str(os.path.basename(exp_yaml)).replace('.yaml', '')}")
print(f"\n{SCRIPT_LABEL}Create folder to save comparison: {comparison_path}")
print(f"\n{SCRIPT_LABEL}Comparing (in {comparison_path}) ...")
if os.path.exists(comparison_path):
shutil.rmtree(comparison_path)
os.makedirs(comparison_path)
os.makedirs(os.path.join(comparison_path, 'figures'))
compare_functions.full_comparison(experiments, VSLAMLAB_BENCHMARK, COMPARISONS_YAML_DEFAULT, comparison_path)
def evaluate(experiments, ablation=False):
print(f"\n{SCRIPT_LABEL}Evaluating (in {VSLAMLAB_EVALUATION}) ...")
for [_, exp] in experiments.items():
with open(exp.config_yaml, 'r') as file:
config_file_data = yaml.safe_load(file)
for dataset_name, sequence_names in config_file_data.items():
dataset = get_dataset(dataset_name, VSLAMLAB_BENCHMARK)
for sequence_name in sequence_names:
evaluate_sequence(exp, dataset, sequence_name, ablation)
def run(experiments, exp_yaml, ablation=False):
print(f"\n{SCRIPT_LABEL}Running experiments (in {exp_yaml}) ...")
start_time = time.time()
num_executed_iterations = 0
duration_time_total = 0
duration_time_average = 0
remaining_iterations = 0
while True:
experiments_ = {}
for [exp_name, exp] in experiments.items():
remaining_iterations = 0
baseline = get_baseline(exp.module)
with open(exp.config_yaml, 'r') as file:
config_file_data = yaml.safe_load(file)
for dataset_name, sequence_names in config_file_data.items():
dataset = get_dataset(dataset_name, VSLAMLAB_BENCHMARK)
for sequence_name in sequence_names:
sequence_folder = os.path.join(exp.folder, dataset_name.upper(), sequence_name)
num_system_output_files = 0
if os.path.exists(sequence_folder):
search_pattern = os.path.join(sequence_folder, f'*system_output_*')
num_system_output_files = len(glob.glob(search_pattern))
remaining_iterations_seq = exp.num_runs - num_system_output_files
remaining_iterations += remaining_iterations_seq
if num_system_output_files < exp.num_runs:
exp_it = num_system_output_files
duration_time = run_sequence(exp_it, exp, baseline, dataset, sequence_name, ablation)
duration_time_total += duration_time
num_executed_iterations += 1
remaining_iterations -= 1
#duration_time_average = duration_time_total / num_executed_iterations
#remaining_time += (remaining_iterations_seq - 1) * duration_time_average
if remaining_iterations > 0:
experiments_[exp_name] = exp
if len(experiments_) == 0:
break
experiments = experiments_
duration_time_average = duration_time_total / num_executed_iterations
remaining_time = remaining_iterations * duration_time_average
if remaining_time > 1:
print(f"\n{SCRIPT_LABEL}: Experiment report")
print(f"{ws(4)}\033[93mNumber of executed iterations: {num_executed_iterations} / {num_executed_iterations + remaining_iterations} \033[0m")
print(f"{ws(4)}\033[93mNumber of remaining iterations: {remaining_iterations}\033[0m")
print(f"{ws(4)}\033[93mTotal time consumed: {show_time(duration_time_total)}\033[0m")
print(f"{ws(4)}\033[93mAverage time per iteration: {show_time(duration_time_average)}\033[0m")
print(f"{ws(4)}\033[93mRemaining time until completion: {show_time(remaining_time)}\033[0m")
run_time = (time.time() - start_time)
print(f"\033[93m[Experiment runtime: {show_time(run_time)}]\033[0m")
def download(config_files):
download_issues = find_download_issues(config_files)
solve_download_issues(download_issues)
print(f"\n{SCRIPT_LABEL}Downloading (to {VSLAMLAB_BENCHMARK}) ...")
for config_file in config_files:
with open(config_file, 'r') as file:
config_file_data = yaml.safe_load(file)
for dataset_name, sequence_names in config_file_data.items():
dataset = get_dataset(dataset_name, VSLAMLAB_BENCHMARK)
for sequence_name in sequence_names:
dataset.download_sequence(sequence_name)
def check_config_integrity(config_files):
dataset_list = list_datasets()
for config_file in config_files:
check_yaml_file_integrity(config_file)
with open(config_file, 'r') as file:
config_file_data = yaml.safe_load(file)
for dataset_name, sequence_names in config_file_data.items():
if not (dataset_name in dataset_list):
print(f"\n{SCRIPT_LABEL}Error in : {config_file}")
print(f"{ws(4)}'{dataset_name}' dataset doesn't exist")
print_datasets()
sys.exit(1)
dataset = get_dataset(dataset_name, VSLAMLAB_BENCHMARK)
for sequence_name in sequence_names:
if not dataset.contains_sequence(sequence_name):
print(f"\n{SCRIPT_LABEL}Error in : {config_file}")
print(f"{ws(4)}'{dataset_name}' dataset doesn't contain sequence '{sequence_name}'")
print(f"\nAvailable sequences are: {dataset.sequence_names}")
print(f"")
sys.exit(1)
def print_datasets():
dataset_list = list_datasets()
print(f"\n{SCRIPT_LABEL}Accessible datasets in VSLAM-LAB:")
for dataset in dataset_list:
print(f" - {dataset}")
print("")
def estimate_experiments_time(experiments):
running_time = 0
for [exp_name, exp] in experiments.items():
with open(exp.config_yaml, 'r') as file:
config_file_data = yaml.safe_load(file)
for dataset_name, sequence_names in config_file_data.items():
dataset = get_dataset(dataset_name, VSLAMLAB_BENCHMARK)
for sequence_name in sequence_names:
num_frames = dataset.get_sequence_num_rgb(sequence_name)
running_time += exp.num_runs * num_frames / dataset.rgb_hz
return 1.5 * running_time / 60
def find_download_issues(config_files):
download_issues = {}
for config_file in config_files:
with open(config_file, 'r') as file:
config_file_data = yaml.safe_load(file)
for dataset_name, sequence_names in config_file_data.items():
dataset = get_dataset(dataset_name, VSLAMLAB_BENCHMARK)
download_issues[dataset.dataset_name] = {}
for sequence_name in sequence_names:
sequence_availabilty = dataset.check_sequence_availability(sequence_name)
if sequence_availabilty != "available":
issues_seq = dataset.get_download_issues(sequence_name)
if issues_seq == {}:
continue
for issue_name, issue_topic in issues_seq.items():
download_issues[dataset.dataset_name][issue_name] = issue_topic
print(f"\n{SCRIPT_LABEL}Finding download issues...")
num_download_issues = 0
for dataset_name, issues_dataset in download_issues.items():
for issue_name, issue_topic in issues_dataset.items():
print(f"{ws(4)}[{dataset_name}][{issue_name}]: {issue_topic}")
num_download_issues += 1
if num_download_issues > 0:
message = (f"\n{SCRIPT_LABEL}Found download issues: your experiments have {num_download_issues} download "
f"issues. Would you like to continue solving them and download the datasets (Y/n):")
try:
user_input = inputimeout(prompt=message, timeout=120).strip().upper()
except TimeoutOccurred:
user_input = 'Y'
print(" No input detected. Defaulting to 'Y'.")
if user_input != 'Y':
exit()
else:
message = (f"{ws(4)}Found download issues: your experiments have {num_download_issues} download "
f"issues.")
print(message)
download_issues = {}
return download_issues
def solve_download_issues(download_issues):
if download_issues == {}:
return
print(f"\n{SCRIPT_LABEL}Solving download issues: ")
for dataset_name, issues_dataset in download_issues.items():
dataset = get_dataset(dataset_name, VSLAMLAB_BENCHMARK)
for download_issue in issues_dataset.items():
dataset.solve_download_issue(download_issue)
if __name__ == "__main__":
main()