Skip to content

Commit

Permalink
feat(translation): add support for single buffered module to sequential
Browse files Browse the repository at this point in the history
BREAKING CHANGE
  • Loading branch information
glencoe committed Mar 5, 2023
1 parent 4d83d1b commit 5402782
Show file tree
Hide file tree
Showing 17 changed files with 231 additions and 95 deletions.
4 changes: 4 additions & 0 deletions elasticai/creator/hdl/design_base/network_blocks.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,3 +44,7 @@ def __init__(
_signals.x_address(calculate_address_width(x_width)),
]
self._port = Port(incoming=in_signals, outgoing=out_signals)

@property
def port(self) -> Port:
return self._port
18 changes: 11 additions & 7 deletions elasticai/creator/hdl/vhdl/designs/fp_linear_1d.py
Original file line number Diff line number Diff line change
@@ -1,31 +1,35 @@
from typing import Optional

from elasticai.creator.hdl.design_base.network_blocks import BufferedNetworkBlock
from elasticai.creator.hdl.translatable import Folder
from elasticai.creator.hdl.translatable import Path
from elasticai.creator.hdl.vhdl.code_generation.template import Template
from elasticai.creator.hdl.vhdl.number_representations import FixedPointConfig


class FPLinear1d(BufferedNetworkBlock):
def __init__(
self,
*,
in_feature_num: int,
out_feature_num: int,
fixed_point_config: FixedPointConfig,
total_bits: int,
frac_bits: int,
work_library_name: str = "work",
resource_option: str = "auto",
name: Optional[str] = None,
):
super().__init__(
name="fp_linear1d" if name is None else name,
x_width=fixed_point_config.total_bits,
y_width=fixed_point_config.total_bits,
x_width=total_bits,
y_width=total_bits,
)
self.in_feature_num = in_feature_num
self.out_feature_num = out_feature_num
self.work_library_name = work_library_name
self.resource_option = resource_option
self.frac_width = fixed_point_config.frac_bits
self.frac_width = frac_bits
self.data_width = total_bits
self.x_addr_width = self.port["x_address"].width
self.y_addr_width = self.port["y_address"].width

def _template_parameters(self) -> dict[str, str]:
return dict(
Expand All @@ -40,7 +44,7 @@ def _template_parameters(self) -> dict[str, str]:
)
)

def save_to(self, destination: Folder):
def save_to(self, destination: Path):
template = Template(base_name="fp_linear_1d")
template.update_parameters(
layer_name=self.name,
Expand Down
7 changes: 7 additions & 0 deletions elasticai/creator/hdl/vhdl/designs/sequential.py
Original file line number Diff line number Diff line change
Expand Up @@ -126,6 +126,13 @@ def _generate_connections(self) -> list[str]:
connections[self._qualified_signal_name(instance, "clock")] = "clock"
connections[self._qualified_signal_name(instance, "enable")] = last_done
last_y = self._qualified_signal_name(instance, "y")
if "y_address" in self.instances[instance].port:
connections[self._qualified_signal_name(instance, "y_address")] = (
last_x_address
)
if "done" in self.instances[instance].port:
last_done = self._qualified_signal_name(instance, "done")
last_x_address = self._qualified_signal_name(instance, "x_address")
connections["y"] = last_y
connections["x_address"] = last_x_address
connections["done"] = last_done
Expand Down
71 changes: 0 additions & 71 deletions elasticai/creator/nn/arithmetics.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,14 +2,6 @@

import torch

from elasticai.creator.nn.autograd_functions.fixed_point_quantization import (
FixedPointDequantFunction,
FixedPointQuantFunction,
)
from elasticai.creator.nn.two_complement_fixed_point_config import (
TwoComplementFixedPointConfig,
)


class Arithmetics(Protocol):
def quantize(self, a: torch.Tensor) -> torch.Tensor:
Expand All @@ -32,66 +24,3 @@ def mul(self, a: torch.Tensor, b: torch.Tensor) -> torch.Tensor:

def matmul(self, a: torch.Tensor, b: torch.Tensor) -> torch.Tensor:
...


class FloatArithmetics(Arithmetics):
def quantize(self, a: torch.Tensor) -> torch.Tensor:
return a

def clamp(self, a: torch.Tensor) -> torch.Tensor:
return a

def round(self, a: torch.Tensor) -> torch.Tensor:
return a

def add(self, a: torch.Tensor, b: torch.Tensor) -> torch.Tensor:
return a + b

def sum(self, tensor: torch.Tensor, *tensors: torch.Tensor) -> torch.Tensor:
summed = tensor
for t in tensors:
summed += t
return summed

def mul(self, a: torch.Tensor, b: torch.Tensor) -> torch.Tensor:
return a * b

def matmul(self, a: torch.Tensor, b: torch.Tensor) -> torch.Tensor:
return torch.matmul(a, b)


class FixedPointArithmetics(Arithmetics):
def __init__(self, config: TwoComplementFixedPointConfig) -> None:
self.config = config

def quantize(self, a: torch.Tensor) -> torch.Tensor:
return self.round(self.clamp(a))

def clamp(self, a: torch.Tensor) -> torch.Tensor:
return torch.clamp(
a, min=self.config.minimum_as_rational, max=self.config.maximum_as_rational
)

def round(self, a: torch.Tensor) -> torch.Tensor:
def float_to_int(x: torch.Tensor) -> torch.Tensor:
return FixedPointQuantFunction.apply(x, self.config)

def int_to_fixed_point(x: torch.Tensor) -> torch.Tensor:
return FixedPointDequantFunction.apply(x, self.config)

return int_to_fixed_point(float_to_int(a))

def add(self, a: torch.Tensor, b: torch.Tensor) -> torch.Tensor:
return self.clamp(a + b)

def sum(self, tensor: torch.Tensor, *tensors: torch.Tensor) -> torch.Tensor:
summed = tensor
for t in tensors:
summed += t
return self.clamp(summed)

def mul(self, a: torch.Tensor, b: torch.Tensor) -> torch.Tensor:
return self.round(self.clamp(a * b))

def matmul(self, a: torch.Tensor, b: torch.Tensor) -> torch.Tensor:
return self.round(self.clamp(torch.matmul(a, b)))
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@

import torch

from elasticai.creator.nn.two_complement_fixed_point_config import (
from elasticai.creator.nn._two_complement_fixed_point_config import (
TwoComplementFixedPointConfig,
)

Expand Down
47 changes: 47 additions & 0 deletions elasticai/creator/nn/fixed_point_arithmetics.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,47 @@
import torch

from elasticai.creator.nn._two_complement_fixed_point_config import (
TwoComplementFixedPointConfig,
)
from elasticai.creator.nn.arithmetics import Arithmetics
from elasticai.creator.nn.autograd_functions.fixed_point_quantization import (
FixedPointDequantFunction,
FixedPointQuantFunction,
)


class FixedPointArithmetics(Arithmetics):
def __init__(self, config: TwoComplementFixedPointConfig) -> None:
self.config = config

def quantize(self, a: torch.Tensor) -> torch.Tensor:
return self.round(self.clamp(a))

def clamp(self, a: torch.Tensor) -> torch.Tensor:
return torch.clamp(
a, min=self.config.minimum_as_rational, max=self.config.maximum_as_rational
)

def round(self, a: torch.Tensor) -> torch.Tensor:
def float_to_int(x: torch.Tensor) -> torch.Tensor:
return FixedPointQuantFunction.apply(x, self.config)

def int_to_fixed_point(x: torch.Tensor) -> torch.Tensor:
return FixedPointDequantFunction.apply(x, self.config)

return int_to_fixed_point(float_to_int(a))

def add(self, a: torch.Tensor, b: torch.Tensor) -> torch.Tensor:
return self.clamp(a + b)

def sum(self, tensor: torch.Tensor, *tensors: torch.Tensor) -> torch.Tensor:
summed = tensor
for t in tensors:
summed += t
return self.clamp(summed)

def mul(self, a: torch.Tensor, b: torch.Tensor) -> torch.Tensor:
return self.round(self.clamp(a * b))

def matmul(self, a: torch.Tensor, b: torch.Tensor) -> torch.Tensor:
return self.round(self.clamp(torch.matmul(a, b)))
29 changes: 29 additions & 0 deletions elasticai/creator/nn/float_arithmetics.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,29 @@
import torch

from elasticai.creator.nn.arithmetics import Arithmetics


class FloatArithmetics(Arithmetics):
def quantize(self, a: torch.Tensor) -> torch.Tensor:
return a

def clamp(self, a: torch.Tensor) -> torch.Tensor:
return a

def round(self, a: torch.Tensor) -> torch.Tensor:
return a

def add(self, a: torch.Tensor, b: torch.Tensor) -> torch.Tensor:
return a + b

def sum(self, tensor: torch.Tensor, *tensors: torch.Tensor) -> torch.Tensor:
summed = tensor
for t in tensors:
summed += t
return summed

def mul(self, a: torch.Tensor, b: torch.Tensor) -> torch.Tensor:
return a * b

def matmul(self, a: torch.Tensor, b: torch.Tensor) -> torch.Tensor:
return torch.matmul(a, b)
28 changes: 24 additions & 4 deletions elasticai/creator/nn/linear.py
Original file line number Diff line number Diff line change
@@ -1,11 +1,13 @@
from dataclasses import dataclass
from typing import Any, cast

import torch

from elasticai.creator.nn.arithmetics import Arithmetics, FixedPointArithmetics
from elasticai.creator.nn.two_complement_fixed_point_config import (
from elasticai.creator.nn._two_complement_fixed_point_config import (
TwoComplementFixedPointConfig,
)
from elasticai.creator.nn.arithmetics import Arithmetics
from elasticai.creator.nn.fixed_point_arithmetics import FixedPointArithmetics


class Linear(torch.nn.Linear):
Expand Down Expand Up @@ -34,23 +36,41 @@ def quantized_forward(self, x: torch.Tensor) -> torch.Tensor:
raise NotImplementedError("The quantized_forward function is not implemented.")


@dataclass
class FixedPointConfig:
frac_bits: int
total_bits: int


class FixedPointLinear(Linear):
def __init__(
self,
in_features: int,
out_features: int,
config: TwoComplementFixedPointConfig,
config: FixedPointConfig,
bias: bool,
device: Any = None,
) -> None:
super().__init__(
in_features=in_features,
out_features=out_features,
arithmetics=FixedPointArithmetics(config=config),
arithmetics=FixedPointArithmetics(
config=TwoComplementFixedPointConfig(
total_bits=config.total_bits, frac_bits=config.total_bits
)
),
bias=bias,
device=device,
)

@property
def total_bits(self) -> int:
return cast(FixedPointArithmetics, self.ops).config.total_bits

@property
def frac_bits(self) -> int:
return cast(FixedPointArithmetics, self.ops).config.frac_bits

@property
def fixed_point_factory(self) -> TwoComplementFixedPointConfig:
return cast(FixedPointArithmetics, self.ops).config
8 changes: 4 additions & 4 deletions elasticai/creator/nn/lstm.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,13 +4,13 @@

import torch

from elasticai.creator.nn.arithmetics import FixedPointArithmetics
from elasticai.creator.nn._two_complement_fixed_point_config import (
TwoComplementFixedPointConfig,
)
from elasticai.creator.nn.fixed_point_arithmetics import FixedPointArithmetics
from elasticai.creator.nn.hard_sigmoid import HardSigmoid
from elasticai.creator.nn.hard_tanh import HardTanh
from elasticai.creator.nn.lstm_cell import LSTMCell
from elasticai.creator.nn.two_complement_fixed_point_config import (
TwoComplementFixedPointConfig,
)


class LSTM(torch.nn.Module):
Expand Down
13 changes: 13 additions & 0 deletions elasticai/creator/translatable_modules/vhdl/fp_linear_1d.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
from elasticai.creator.hdl.translatable import Saveable
from elasticai.creator.hdl.vhdl.designs.fp_linear_1d import FPLinear1d as FPLinearDesign
from elasticai.creator.nn.linear import FixedPointLinear


class FPLinear1d(FixedPointLinear):
def translate(self) -> Saveable:
return FPLinearDesign(
frac_bits=self.frac_bits,
total_bits=self.total_bits,
in_feature_num=self.in_features,
out_feature_num=self.out_features,
)
Loading

0 comments on commit 5402782

Please sign in to comment.