-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathloss.py
81 lines (61 loc) · 2.1 KB
/
loss.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
import numpy as np
from layers import Softmax
from utils import make_one_hot_target
class SquaredLoss:
def calc_loss(self, y, target):
J = 0.5 * (target - y) ** 2
return J
def calc_gradient(self, y, target):
dJdy = (y - target)
return dJdy
class NegLogLikelihoodLoss:
def calc_loss(self, y, target):
# J = - target * np.log(y)
J = - target * np.log(np.maximum(y, 0.0000001))
return J
def calc_gradient(self, y, target):
# return - target / y
return -target / np.maximum(y,0.0000001)
class CrossEntropyLoss:
"""This is the combination of a final SoftmaxLayer and a Negative Log-Likelihood loss function."""
def calc_loss(self, x, one_hot_target):
c = np.max(x)
exp_x = np.exp(x - c)
self.p = np.divide(exp_x, np.sum(exp_x))
J = - one_hot_target * np.log(self.p)
return J
def calc_gradient(self, y, one_hot_target):
return (self.p - one_hot_target)
@staticmethod
def test_score(model, test_set):
test_err = 0.
for x, target in test_set:
y = model.forward(x)
y = Softmax().forward(y)
# print(y, np.argmax(y), np.argmax(target))
if np.argmax(y) != np.argmax(target):
test_err += 1.
test_score = (1.0 - test_err / float(len(test_set))) * 100.0
return test_score
NLL = NegLogLikelihoodLoss
class ClaudioMaxNLL:
def calc_loss(self, y, target_class):
one_hot_target = make_one_hot_target(y.size, target_class)
self.y = y
self.s = np.sum(y)
J = - one_hot_target * (y / self.s)
return J
def calc_gradient(self, last_y, target_class):
s = self.s
y = self.y
z = y / s ** 2
diag = - ((1 / s) - z)
m = np.zeros((y.size, y.size))
for i, row in enumerate(m):
for j, cell in enumerate(row):
if i != j:
m[i][j] = z[i]
else:
m[i][j] = diag[i]
target_class = int(target_class)
return m[target_class]