-
Notifications
You must be signed in to change notification settings - Fork 0
/
BP.cpp
138 lines (93 loc) · 2.19 KB
/
BP.cpp
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
#include"BP.h"
#include"Matrix.h"
#include"Layer.h"
#include"FullyConnected.h"
#include"ReLU.h"
#include"SoftmaxCrossEntropy.h"
#include<vector>
#include<iostream>
#include<string>
#include<glog/logging.h>
BPnet::BPnet()
{
}
BPnet::BPnet(std::vector<int>& FC_nums, const char* i_mthd, OPTIMIZER opt)
{
optimizer = opt;
int layer_num = FC_nums.size();
int hidden_nums;
FULLY_CONNECTED fc_layer;
RELU relu_layer;
layers.reserve(3*layer_num);
for(auto i{0}; i < layer_num-1; i++)
{
hidden_nums = FC_nums[i];
fc_layer = std::make_shared<FC_Layer>(hidden_nums, i_mthd);
layers.push_back(fc_layer);
relu_layer = std::make_shared<ReLU_Layer>();
layers.push_back(relu_layer);
}
hidden_nums = FC_nums[layer_num-1];
layers.push_back(std::make_shared<FC_Layer>(hidden_nums, i_mthd));
init_method = std::string(i_mthd);
}
BPnet::BPnet(BPnet& net)
{
layers = net.layers;
LossFunc = net.LossFunc;
optimizer = net.optimizer;
init_method = net.init_method;
}
BPnet::BPnet(BPnet&& net)
{
layers.swap(net.layers);
LossFunc = net.LossFunc;
optimizer = net.optimizer;
init_method = net.init_method;
}
BPnet& BPnet::operator=(BPnet net)
{
layers = net.layers;
LossFunc = net.LossFunc;
optimizer = net.optimizer;
init_method = net.init_method;
return *this;
}
MATRIX BPnet::forward(MATRIX mat)
{
for (auto& layer:layers)
{
mat = layer->forward(mat);
}
return mat;
}
MATRIX BPnet::backward(MATRIX grad_pre, OPTIMIZER optimizer)
{
MATRIX grad{grad_pre};
int len = static_cast<int>(layers.size());
for (int i{0}; i < len; ++i)
{
grad = layers[len-i-1]->backward(grad, optimizer);
}
return grad;
}
void BPnet::update() {
for(auto el : layers) {
el->update();
}
}
double BPnet::train(MATRIX& in_mat, MATRIX& label)
{
MATRIX Loss;
MATRIX scores;
MATRIX grad;
// forward
scores = forward(in_mat);
Loss = LossFunc.forward(scores, label);
// backward
grad = LossFunc.backward();
backward(grad, optimizer);
// update network
update();
return Loss.ToScalar();
}