Skip to content

Commit

Permalink
Merge pull request #136 from es-ude/127-implementation-for-fully-conn…
Browse files Browse the repository at this point in the history
…ected-layer

Implementation for linear layer
  • Loading branch information
julianhoever authored Aug 26, 2022
2 parents 3146b3b + c1750eb commit 3b6969e
Show file tree
Hide file tree
Showing 34 changed files with 1,321 additions and 633 deletions.
4 changes: 3 additions & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -15,13 +15,15 @@ The project is part of the elastic ai ecosystem developed by the Embedded System

- [Users Guide](#users-guide)
- [Install](#install)
- [Structure of the Project](#structure-of-the-project)
- [General Limitations](#general-limitations)
- [Developers Guide](#developers-guide)
- [Install Dev Dependencies](#install-dev-dependencies)


## Users Guide

#### Install
### Install
You can install the ElasticAI.creator as a dependency using pip:
```bash
python3 -m pip install "elasticai.creator"
Expand Down
53 changes: 0 additions & 53 deletions elasticai/creator/examples/translate_lstm.py

This file was deleted.

65 changes: 65 additions & 0 deletions elasticai/creator/examples/translate_lstm_linear_model.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,65 @@
import argparse
from pathlib import Path

import torch

from elasticai.creator.vhdl.number_representations import FixedPoint
from elasticai.creator.vhdl.translator.abstract.layers import (
Linear1dTranslationArgs,
LSTMTranslationArgs,
)
from elasticai.creator.vhdl.translator.pytorch import translator
from elasticai.creator.vhdl.translator.pytorch.build_function_mappings import (
DEFAULT_BUILD_FUNCTION_MAPPING,
)


def read_commandline_args() -> argparse.Namespace:
parser = argparse.ArgumentParser()
parser.add_argument("--build_dir", required=True, type=Path)
return parser.parse_args()


class LSTMModel(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.lstm = torch.nn.LSTM(input_size=1, hidden_size=10)
self.linear = torch.nn.Linear(in_features=10, out_features=1)

def forward(self, x: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor]:
return self.linear(self.lstm(x)[0])


def main() -> None:
args = read_commandline_args()

model = LSTMModel()

fixed_point_factory = FixedPoint.get_factory(total_bits=8, frac_bits=4)
work_library_name = "xil_defaultlib"
translation_args = dict(
LSTMTranslatable=LSTMTranslationArgs(
fixed_point_factory=fixed_point_factory,
sigmoid_resolution=(-2.5, 2.5, 256),
tanh_resolution=(-1, 1, 256),
work_library_name=work_library_name,
),
Linear1dTranslatable=Linear1dTranslationArgs(
fixed_point_factory=fixed_point_factory,
work_library_name=work_library_name,
),
)

translatable_layers = translator.translate_model(
model=model, build_function_mapping=DEFAULT_BUILD_FUNCTION_MAPPING
)

code_repr = translator.generate_code(
translatable_layers=translatable_layers, translation_args=translation_args
)

translator.save_code(code_repr=code_repr, path=args.build_dir)


if __name__ == "__main__":
main()
Empty file.
Original file line number Diff line number Diff line change
@@ -0,0 +1,28 @@
import unittest

from elasticai.creator.vhdl.components import Linear1dComponent
from elasticai.creator.vhdl.number_representations import FixedPoint


class Linear1dComponentTest(unittest.TestCase):
def setUp(self) -> None:
self.component = Linear1dComponent(
in_features=20,
out_features=1,
fixed_point_factory=FixedPoint.get_factory(total_bits=16, frac_bits=8),
work_library_name="work",
)

def test_derives_correct_data_width(self) -> None:
self.assertEqual(self.component.data_width, 16)

def test_calculates_correct_addr_width(self) -> None:
self.assertEqual(self.component.addr_width, 5)

def test_out_features_larger_1_raises_not_implemented_error(self) -> None:
with self.assertRaises(NotImplementedError):
_ = Linear1dComponent(
in_features=3,
out_features=2,
fixed_point_factory=FixedPoint.get_factory(total_bits=8, frac_bits=4),
)
27 changes: 27 additions & 0 deletions elasticai/creator/tests/vhdl/components/test_lstm_component.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,27 @@
import unittest

from elasticai.creator.vhdl.components import LSTMComponent
from elasticai.creator.vhdl.number_representations import FixedPoint


class LSTMComponentTest(unittest.TestCase):
def setUp(self) -> None:
self.lstm = LSTMComponent(
input_size=5,
hidden_size=3,
fixed_point_factory=FixedPoint.get_factory(total_bits=8, frac_bits=4),
work_library_name="xil_defaultlib",
)

def test_fixed_point_params_correct_derived(self):
self.assertEqual(8, self.lstm.data_width)
self.assertEqual(4, self.lstm.frac_width)

def test_x_h_addr_width_correct_set(self):
self.assertEqual(3, self.lstm.x_h_addr_width)

def test_hidden_addr_width_correct_set(self):
self.assertEqual(3, self.lstm.hidden_addr_width)

def test_w_addr_width_correct_set(self):
self.assertEqual(5, self.lstm.w_addr_width)
26 changes: 26 additions & 0 deletions elasticai/creator/tests/vhdl/components/test_rom_component.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,26 @@
import unittest

from elasticai.creator.vhdl.components import RomComponent
from elasticai.creator.vhdl.number_representations import FixedPoint


class RomComponentTest(unittest.TestCase):
def setUp(self) -> None:
fp = FixedPoint.get_factory(total_bits=16, frac_bits=8)
self.rom = RomComponent(
rom_name="test_rom",
values=[fp(i) for i in range(20)],
resource_option="auto",
)

def test_data_width_correct_derived(self) -> None:
self.assertEqual(self.rom.data_width, 16)

def test_addr_width_correct_calculated(self) -> None:
self.assertEqual(self.rom.addr_width, 5)

def test_correct_number_of_values(self) -> None:
self.assertEqual(len(self.rom.hex_values), 32)

def test_values_correct_padded(self) -> None:
self.assertEqual(['x"0000"'] * 12, self.rom.hex_values[20:])
Original file line number Diff line number Diff line change
@@ -0,0 +1,33 @@
import unittest

from elasticai.creator.vhdl.components import (
Linear1dComponent,
LSTMCommonComponent,
RomComponent,
)
from elasticai.creator.vhdl.number_representations import FixedPoint
from elasticai.creator.vhdl.translator.abstract.layers import (
Linear1dTranslatable,
Linear1dTranslationArgs,
)


class Linear1dTranslatableTest(unittest.TestCase):
def setUp(self) -> None:
self.linear = Linear1dTranslatable(weight=[[1, 2, 3]], bias=[1])
self.translation_args = Linear1dTranslationArgs(
fixed_point_factory=FixedPoint.get_factory(total_bits=8, frac_bits=4)
)

def test_contains_all_needed_components(self) -> None:
vhdl_components = self.linear.translate(self.translation_args)

target_components = [
(Linear1dComponent, "linear_1d.vhd"),
(RomComponent, "w_rom.vhd"),
(RomComponent, "b_rom.vhd"),
(LSTMCommonComponent, "lstm_common.vhd"),
]
actual_components = [(type(x), x.file_name) for x in vhdl_components]

self.assertEqual(actual_components, target_components)
Original file line number Diff line number Diff line change
@@ -1,4 +1,3 @@
from functools import partial
from unittest import TestCase

from elasticai.creator.vhdl.components.dual_port_2_clock_ram_component import (
Expand All @@ -12,23 +11,23 @@
from elasticai.creator.vhdl.components.sigmoid_component import SigmoidComponent
from elasticai.creator.vhdl.components.tanh_component import TanhComponent
from elasticai.creator.vhdl.number_representations import FixedPoint
from elasticai.creator.vhdl.translator.abstract.layers.abstract_lstm import (
AbstractLSTM,
LSTMTranslationArguments,
from elasticai.creator.vhdl.translator.abstract.layers.lstm_translatable import (
LSTMTranslatable,
LSTMTranslationArgs,
)


class LSTMTest(TestCase):
class LSTMTranslatableTest(TestCase):
def setUp(self) -> None:
self.lstm = AbstractLSTM(
self.lstm = LSTMTranslatable(
weights_ih=[[[1, 2], [3, 4], [5, 6], [7, 8]]],
weights_hh=[[[1], [2], [3], [4]]],
biases_ih=[[1, 2, 3, 4]],
biases_hh=[[5, 6, 7, 8]],
)

self.translation_args = LSTMTranslationArguments(
fixed_point_factory=partial(FixedPoint, total_bits=8, frac_bits=2),
self.translation_args = LSTMTranslationArgs(
fixed_point_factory=FixedPoint.get_factory(total_bits=8, frac_bits=2),
sigmoid_resolution=(-2.5, 2.5, 256),
tanh_resolution=(-1, 1, 256),
)
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,25 @@
import unittest

import torch

from elasticai.creator.vhdl.translator.pytorch.build_functions import build_linear_1d


def arange_parameter(
start: int, end: int, shape: tuple[int, ...]
) -> torch.nn.Parameter:
return torch.nn.Parameter(
torch.reshape(torch.arange(start, end, dtype=torch.float32), shape)
)


class Linear1dBuildFunctionTest(unittest.TestCase):
def setUp(self) -> None:
self.linear = torch.nn.Linear(in_features=3, out_features=1)
self.linear.weight = arange_parameter(start=1, end=4, shape=(1, -1))
self.linear.bias = arange_parameter(start=1, end=2, shape=(-1,))

def test_weights_and_bias_correct_set(self) -> None:
linear1d = build_linear_1d(self.linear)
self.assertEqual(linear1d.weight, [[1.0, 2.0, 3.0]])
self.assertEqual(linear1d.bias, [1.0])
Original file line number Diff line number Diff line change
Expand Up @@ -5,15 +5,15 @@
import torch.nn

from elasticai.creator.vhdl.language import Code
from elasticai.creator.vhdl.translator.abstract.layers import AbstractLSTM
from elasticai.creator.vhdl.translator.abstract.layers import LSTMTranslatable
from elasticai.creator.vhdl.translator.build_function_mapping import (
BuildFunctionMapping,
)
from elasticai.creator.vhdl.translator.pytorch import translator
from elasticai.creator.vhdl.translator.pytorch.build_function_mappings import (
DEFAULT_BUILD_FUNCTION_MAPPING,
)
from elasticai.creator.vhdl.translator.pytorch.translator import CodeFile, Module
from elasticai.creator.vhdl.translator.pytorch.translator import CodeFile, CodeModule
from elasticai.creator.vhdl.vhdl_component import VHDLComponent, VHDLModule


Expand Down Expand Up @@ -48,7 +48,7 @@ def fake_build_function(module: torch.nn.Module) -> TranslatableMock:


def unpack_module_directories(
modules: Iterable[Module],
modules: Iterable[CodeModule],
) -> list[tuple[str, list[tuple[str, Code]]]]:
def unpack_code_file(code_file: CodeFile) -> tuple[str, Code]:
return code_file.file_name, list(code_file.code)
Expand Down Expand Up @@ -109,7 +109,7 @@ def __init__(self) -> None:
def forward(self, x: torch.Tensor) -> torch.Tensor:
return self.lstm_2(self.lstm_1(x))

def extract_input_hidden_size(lstm: AbstractLSTM) -> tuple[int, int]:
def extract_input_hidden_size(lstm: LSTMTranslatable) -> tuple[int, int]:
hidden_size = len(lstm.weights_hh[0][0])
input_size = len(lstm.weights_ih[0][0])
return input_size, hidden_size
Expand Down
Loading

0 comments on commit 3b6969e

Please sign in to comment.