-
Notifications
You must be signed in to change notification settings - Fork 0
/
main_models_viewer.py
59 lines (42 loc) · 1.43 KB
/
main_models_viewer.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
import pandas as pd
import ast
''' DESCRIPTION '''
''' This is the file where we can visualize the final results '''
from termcolor import colored
def select_scores(models):
for i, score in enumerate(models["Scores"]):
print(i, score)
def select_best_precisions(models):
precisions = {}
scores = models["Scores"]
for i, score in enumerate(scores):
precisions[i] = score['precision']
precisions = dict(sorted(precisions.items(), key = lambda item: item[1], reverse = True))
print(precisions)
def select_best_recalls(models):
recalls = {}
scores = models["Scores"]
for i, score in enumerate(scores):
recalls[i] = score['recall']
recalls = dict(sorted(recalls.items(), key = lambda item: item[1], reverse = True))
print(recalls)
def select_best_f1s(models):
f1s = {}
scores = models["Scores"]
for i, score in enumerate(scores):
f1s[i] = score['f1']
f1s = dict(sorted(f1s.items(), key = lambda item: item[1], reverse = True))
print(f1s)
try:
models = pd.read_csv("selected_models/models.csv")
except:
raise FileNotFoundError
if models.empty:
print(colored("No models found", "red"))
models["Scores"] = [ast.literal_eval(i) for i in models["Scores"]]
select_scores(models)
select_best_precisions(models)
select_best_recalls(models)
select_best_f1s(models)
# When you want to analyze a specific experiment, you can call this
print(models.iloc[6])