-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathgru.rs
39 lines (34 loc) · 1.18 KB
/
gru.rs
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
//! Lets test if we can make a basic GRU neural network forward prop without infinitely expanding matrix bounds.
//!
//! Because aljabar uses column vectors, we will use them.
use aljabar::{Matrix, Vector};
#[derive(Clone, Debug)]
struct NNet<F, const I: usize, const O: usize> {
hidden_matrix: Matrix<f32, {O}, {O}>,
input_matrix: Matrix<f32, {O}, {I}>,
biases: Vector<f32, {O}>,
activation: F,
}
impl<F, const I: usize, const O: usize> NNet<F, {I}, {O}>
where F: Fn(f32) -> f32
{
pub fn forward(&self, hidden: &Vector<f32, {O}>, input: &Vector<f32, {I}>) -> Vector<f32, {O}> {
let mut out: [f32; {O}] = (self.hidden_matrix * hidden.clone() + self.input_matrix * input.clone() + self.biases).into();
for n in out.iter_mut() {
*n = (self.activation)(*n);
}
out.into()
}
}
#[cfg(test)]
#[test]
fn test() {
use aljabar::{mat2x2, vec2};
let nnet: NNet<_, 2, 2> = NNet {
hidden_matrix: mat2x2(0.5, 0.5, 0.5, 0.5),
input_matrix: mat2x2(0.5, 0.5, 0.5, 0.5),
biases: vec2(-0.5, 0.5),
activation: |n: f32| (1.0 + (-n).exp()).recip(),
};
dbg!(nnet.forward(vec2(0.2, 0.1), vec2(1.0, 0.0)));
}