-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathtrain.py
114 lines (95 loc) · 3.82 KB
/
train.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
"""
Description:
Achieves:
- Data Preprocessing over Ninapro DataBase5
- Training finetune-base model (Saving weights along the way)
- Visualize training logs (model accuracy and loss during training)
Author: Jimmy L. @ SF State MIC Lab
Date: Summer 2022
"""
from dataset import *
from model import *
import config
import tensorflow as tf
if __name__ == "__main__":
# NOTE: Check if Utilizing GPU device
print(tf.config.list_physical_devices('GPU'))
# NOTE: Data Preprocessings
# Get sEMG samples and labels. (shape: [num samples, 8(sensors/channels)])
emg, label = folder_extract(
config.folder_path,
exercises=config.exercises,
myo_pref=config.myo_pref
)
# Apply Standarization to data, save collected MEAN and STANDARD DEVIATION in the dataset to json
emg = standarization(emg, config.std_mean_path)
# Extract sEMG signals for wanted gestures.
gest = gestures(emg, label, targets=config.targets)
# Perform train test split
train_gestures, test_gestures = train_test_split(gest)
# NOTE: optional visualization that graphs class/gesture distributions
# plot_distribution(train_gestures)
# plot_distribution(test_gestures)
# Convert sEMG data to signal images.
X_train, y_train = apply_window(train_gestures, window=config.window, step=config.step)
# Convert sEMG data to signal images.
X_test, y_test = apply_window(test_gestures, window=config.window, step=config.step)
X_train = X_train.reshape(-1, 8, config.window, 1)
X_test = X_test.reshape(-1, 8, config.window, 1)
X_train = X_train.astype(np.float32)
X_test = X_test.astype(np.float32)
print("Shape of Inputs:\n")
print(X_train.shape)
print(y_train.shape)
print(X_test.shape)
print(y_test.shape)
print("Data Type of Inputs\n")
print(X_train.dtype)
print(X_test.dtype)
print("\n")
# Get Tensorflow model
cnn = get_model(
num_classes=config.num_classes,
filters=config.filters,
neurons=config.neurons,
dropout=config.dropout,
kernel_size=config.k_size,
input_shape=config.in_shape,
pool_size=config.p_kernel
)
# Start training (And saving weights along training)
history = train_model(
cnn, X_train, y_train, X_test, y_test,
config.batch_size, save_path=config.save_path, epochs=config.epochs,
patience=config.patience, lr=config.inital_lr
)
# # Visualize accuarcy and loss logs
# plot_logs(history, acc=True, save_path=config.acc_log)
# plot_logs(history, acc=False, save_path=config.loss_log)
# # Load pretrained model
# model = get_model(
# num_classes=config.num_classes,
# filters=config.filters,
# neurons=config.neurons,
# dropout=config.dropout
# )
# model.load_weights(config.save_path)
# # NOTE: Optional test for loaded model's performance
# model.compile(
# optimizer=tf.keras.optimizers.Adam(learning_rate=0.2),
# loss='sparse_categorical_crossentropy',
# metrics=['accuracy'],
# )
# # See if weights were the same
# model.evaluate(X_test, y_test)
# # Test with finetune model. (last classifier block removed from base model)
# finetune_model = get_finetune(config.save_path, config.prev_params, num_classes=config.num_classes)
# print("finetune model loaded!")
# NOTE: You can load finetune model like this too.
# finetune_model = create_finetune(model, num_classes=4)
# finetune_model.compile(
# optimizer=tf.keras.optimizers.Adam(learning_rate=0.2),
# loss='sparse_categorical_crossentropy',
# metrics=['accuracy'],
# )
# finetune_model.evaluate(X_test, y_test)