-
Notifications
You must be signed in to change notification settings - Fork 0
/
my_mda.py
82 lines (62 loc) · 2.29 KB
/
my_mda.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
import numpy as np
import matplotlib.pyplot as plt
def perform_MDA(dataset, flattened, subjects, types):
noise = {'pose':0.93, 'face':0.999}
print('Performing MDA, please wait....')
mu_class = [] # emperical mean of each class
cov_class = [] # emperical covariace of each class
mu_not = np.zeros(shape=(flattened.shape[1]))
sigma_b = np.zeros(shape=(flattened.shape[1], flattened.shape[1]))
sigma_w = np.zeros(shape=(flattened.shape[1], flattened.shape[1]))
prob = 1/subjects
for i in range(subjects):
# determine class as per the data
start = i*types
end = (i+1)*types
temp = flattened[start:end]
mean = np.mean(temp, axis=0)
mu_class.append(mean)
cov = np.zeros(shape=(temp.shape[1], temp.shape[1]))
for k in range(types):
mat = (temp[k] - mean).reshape(temp.shape[1], 1)
cov = cov + np.dot(mat, mat.T)
# mean of covariance
cov = cov / types
# add noise
cov += noise.get(dataset, 0.8)*np.identity(cov.shape[0])
cov_class.append(cov)
# print(np.linalg.det(cov))
# break
# emperical mean mu_not
mu_not += prob*mean
#sigma_w
sigma_w += prob*cov
# #calculate the sigma_b
for i in range(subjects):
mat = (mu_class[i] - mu_not).reshape(flattened.shape[1], 1)
sigma_b += prob*np.dot(mat, mat.T)
# # pose
# b = sigma_b + 0.747*np.identity(cov.shape[0]) #0.745999
# np.linalg.det(b)
# # face
b = sigma_b + 0.945*np.identity(cov.shape[0])
np.linalg.det(b)
# a = sigma_w #+ np.identity(cov_class[0].shape[0])
a = np.dot(np.linalg.inv(sigma_w), b)
# find the eigen values and eigen vectors
val, vec = np.linalg.eig(a)
# sort the eigen values and corresponding vectors
idx = val.argsort()[::-1]
val_ = val[idx]
vec_ = vec[:, idx]
# take the maximum possible dimensions
dim = 60#subjects - 1
# final vectors till the dimension
final_vectors = vec_[:,:dim]
# get the projection
projection = np.dot(flattened, final_vectors)
print(final_vectors)
print('-----------------------------------------')
print(val[:dim])
print('-----------------------------------------')
return projection