Skip to content

Commit

Permalink
fix: fix broken imports and other errors that leads to failing tests
Browse files Browse the repository at this point in the history
  • Loading branch information
julianhoever committed Aug 26, 2023
1 parent 59cdaae commit 6eac561
Show file tree
Hide file tree
Showing 16 changed files with 57 additions and 76 deletions.
2 changes: 1 addition & 1 deletion elasticai/creator/base_modules/math_operations.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@

class Quantize(Protocol):
@abstractmethod
def quantize(self, x: Tensor) -> Tensor:
def quantize(self, a: Tensor) -> Tensor:
...


Expand Down
2 changes: 2 additions & 0 deletions elasticai/creator/nn/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
from .identity import BufferedIdentity, BufferlessIdentity
from .sequential import Sequential
12 changes: 6 additions & 6 deletions elasticai/creator/nn/fixed_point/_math_operations.py
Original file line number Diff line number Diff line change
@@ -1,15 +1,15 @@
from typing import cast

import torch
from fixed_point._round_to_fixed_point import RoundToFixedPoint
from fixed_point._two_complement_fixed_point_config import FixedPointConfig

from elasticai.creator.base_modules.conv1d import MathOperations as Conv1dOps
from elasticai.creator.base_modules.linear import MathOperations as LinearOps
from elasticai.creator.base_modules.lstm_cell import MathOperations as LSTMOps
from elasticai.creator.base_modules.math_operations import Add, MatMul, Quantize
from elasticai.creator.nn.fixed_point._round_to_fixed_point import RoundToFixedPoint
from elasticai.creator.nn.fixed_point._two_complement_fixed_point_config import (
FixedPointConfig,
)


class MathOperations(LinearOps, LSTMOps, Conv1dOps):
class MathOperations(Quantize, Add, MatMul):
def __init__(self, config: FixedPointConfig) -> None:
self.config = config

Expand Down
5 changes: 4 additions & 1 deletion elasticai/creator/nn/fixed_point/_round_to_fixed_point.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,10 @@
from typing import Any

import torch
from fixed_point._two_complement_fixed_point_config import FixedPointConfig

from elasticai.creator.nn.fixed_point._two_complement_fixed_point_config import (
FixedPointConfig,
)


class RoundToFixedPoint(torch.autograd.Function):
Expand Down
10 changes: 6 additions & 4 deletions elasticai/creator/nn/fixed_point/conv1d/layer.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,10 +2,12 @@

import torch
import torch.nn
from fixed_point._math_operations import Operations
from fixed_point._two_complement_fixed_point_config import FixedPointConfig

from elasticai.creator.base_modules.conv1d import Conv1d
from elasticai.creator.nn.fixed_point._math_operations import MathOperations
from elasticai.creator.nn.fixed_point._two_complement_fixed_point_config import (
FixedPointConfig,
)
from elasticai.creator.vhdl.translatable import Translatable

from .design import FPConv1d as FPConv1dDesign
Expand All @@ -28,7 +30,7 @@ def __init__(
self._config = FixedPointConfig(total_bits=total_bits, frac_bits=frac_bits)
self._signal_length = signal_length
super().__init__(
arithmetics=Operations(config=self._config),
arithmetics=MathOperations(config=self._config),
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
Expand Down Expand Up @@ -102,7 +104,7 @@ def __init__(
) -> None:
super().__init__()
self._config = FixedPointConfig(total_bits=total_bits, frac_bits=frac_bits)
self._arithmetics = Operations(config=self._config)
self._arithmetics = MathOperations(config=self._config)
self._signal_length = signal_length
self._conv1d = Conv1d(
arithmetics=self._arithmetics,
Expand Down
5 changes: 3 additions & 2 deletions elasticai/creator/nn/fixed_point/hard_sigmoid/layer.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
from fixed_point._two_complement_fixed_point_config import FixedPointConfig

from elasticai.creator.base_modules.hard_sigmoid import HardSigmoid
from elasticai.creator.nn.fixed_point._two_complement_fixed_point_config import (
FixedPointConfig,
)
from elasticai.creator.vhdl.design.design import Design
from elasticai.creator.vhdl.translatable import Translatable

Expand Down
5 changes: 3 additions & 2 deletions elasticai/creator/nn/fixed_point/hard_tanh/layer.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
from fixed_point._two_complement_fixed_point_config import FixedPointConfig

from elasticai.creator.base_modules.hard_tanh import HardTanh
from elasticai.creator.nn.fixed_point._two_complement_fixed_point_config import (
FixedPointConfig,
)
from elasticai.creator.vhdl.design.design import Design
from elasticai.creator.vhdl.translatable import Translatable

Expand Down
Original file line number Diff line number Diff line change
@@ -1,12 +1,14 @@
from typing import cast

import torch
from fixed_point._math_operations import Operations
from fixed_point._two_complement_fixed_point_config import FixedPointConfig

from elasticai.creator.base_modules.autograd_functions.identity_step_function import (
IdentityStepFunction,
)
from elasticai.creator.nn.fixed_point._math_operations import MathOperations
from elasticai.creator.nn.fixed_point._two_complement_fixed_point_config import (
FixedPointConfig,
)
from elasticai.creator.vhdl.design.design import Design
from elasticai.creator.vhdl.shared_designs.precomputed_scalar_function import (
PrecomputedScalarFunction,
Expand All @@ -26,7 +28,7 @@ def __init__(
super().__init__()
self._base_module = base_module
self._config = FixedPointConfig(total_bits=total_bits, frac_bits=frac_bits)
self._arithmetics = Operations(self._config)
self._arithmetics = MathOperations(self._config)
self._step_lut = torch.linspace(*sampling_intervall, num_steps)

def forward(self, inputs: torch.Tensor) -> torch.Tensor:
Expand Down
41 changes: 5 additions & 36 deletions tests/base_modules/arithmetics/test_fixed_point_arithmetics.py
Original file line number Diff line number Diff line change
@@ -1,14 +1,16 @@
import torch
from fixed_point._math_operations import Operations
from fixed_point._two_complement_fixed_point_config import FixedPointConfig

from elasticai.creator.nn.fixed_point._math_operations import MathOperations
from elasticai.creator.nn.fixed_point._two_complement_fixed_point_config import (
FixedPointConfig,
)
from tests.tensor_test_case import TensorTestCase


class FixedPointArithmeticsTest(TensorTestCase):
def setUp(self) -> None:
self.config: FixedPointConfig = FixedPointConfig(total_bits=4, frac_bits=2)
self.arithmetics = Operations(config=self.config)
self.arithmetics = MathOperations(config=self.config)

def test_quantize_clamps_minus5_to_minus2(self) -> None:
a = torch.tensor([-5.0])
Expand Down Expand Up @@ -48,42 +50,9 @@ def test_add(self) -> None:
expected = [-1.75, 1.5, 1.75]
self.assertTensorEqual(expected, actual)

def test_sum_over_all_values(self) -> None:
a = torch.tensor([[-0.25, 0.5, 1.0], [-1.5, 1.0, 1.5], [-0.5, -1.0, -1.0]])
actual = self.arithmetics.sum(a)
expected = torch.tensor(-0.25)
self.assertTensorEqual(expected, actual)

def test_sum_on_first_dim(self) -> None:
a = torch.tensor([[-0.25, 0.5, 1.0], [-1.5, 1.0, 1.5], [-0.5, -1.0, -1.0]])
actual = self.arithmetics.sum(a, dim=0)
expected = [-2.0, 0.5, 1.5]
self.assertTensorEqual(expected, actual)

def test_mul(self) -> None:
a = torch.tensor([-0.5, 1.5, 0.5])
b = torch.tensor([0.5, 1.5, 1.25])
actual = self.arithmetics.mul(a, b)
expected = [-0.25, 1.75, 0.5]
self.assertTensorEqual(expected, actual)

def test_matmul(self) -> None:
a = torch.tensor([[-2.0, -1.75, -1.5], [-0.25, 0.0, 0.25], [1.25, 1.5, 1.75]])
b = torch.tensor([[-0.25], [0.5], [0.25]])
actual = self.arithmetics.matmul(a, b)
expected = [[-0.75], [0.0], [0.75]]
self.assertTensorEqual(expected, actual)

def test_conv1d(self) -> None:
inputs = torch.tensor([[-1.75, -1.5, -1, -0.25, 1, 2.5, 3.75]])
actual_outputs = self.arithmetics.conv1d(
inputs=inputs,
weights=torch.ones(1, 1, 2),
bias=torch.ones(1),
stride=1,
padding="valid",
dilation=1,
groups=1,
)
target_outputs = torch.tensor([[-2.0, -1.5, -0.25, 1.75, 1.75, 1.75]])
self.assertTensorEqual(target_outputs, actual_outputs)
21 changes: 10 additions & 11 deletions tests/base_modules/autograd_functions/test_binary_quantization.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
from types import SimpleNamespace
from typing import cast

import torch
from torch import Tensor
Expand All @@ -9,6 +10,10 @@
from tests.tensor_test_case import TensorTestCase


def binarize(x: Tensor) -> Tensor:
return cast(Tensor, Binarize.apply(x))


class BinarizeFunctionTest(TensorTestCase):
def test_ForwardRaisesErrorOnMissingInput(self):
def save_for_backward_dummy(_):
Expand All @@ -23,25 +28,19 @@ def save_for_backward_dummy(_):
self.fail()

def test_Yields1For0(self):
self.assertTensorEqual(
expected=Tensor([1.0]), actual=Binarize.apply(Tensor([0.0]))
)
self.assertTensorEqual(expected=Tensor([1.0]), actual=binarize(Tensor([0.0])))

def test_Yields1For2Point4(self):
self.assertTensorEqual(
expected=Tensor([1.0]), actual=Binarize.apply(Tensor([2.4]))
)
self.assertTensorEqual(expected=Tensor([1.0]), actual=binarize(Tensor([2.4])))

def test_YieldMinus1ForNegativeInput(self):
self.assertTensorEqual(
expected=Tensor([-1.0]), actual=Binarize.apply(Tensor([-2.8]))
)
self.assertTensorEqual(expected=Tensor([-1.0]), actual=binarize(Tensor([-2.8])))

def check_gradient(self, expected_grad, x):
x = torch.tensor([x], requires_grad=True)
y = Binarize.apply(x)
y = binarize(x)
y.backward()
self.assertTensorEqual(torch.tensor([expected_grad]), x.grad)
self.assertTensorEqual(torch.tensor([expected_grad]), cast(Tensor, x.grad))

def test_gradient_is_0_for_input_greater_1(self):
self.check_gradient(expected_grad=0.0, x=1.1)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -2,9 +2,11 @@

import pytest
import torch
from fixed_point._round_to_fixed_point import RoundToFixedPoint
from fixed_point._two_complement_fixed_point_config import FixedPointConfig

from elasticai.creator.nn.fixed_point._round_to_fixed_point import RoundToFixedPoint
from elasticai.creator.nn.fixed_point._two_complement_fixed_point_config import (
FixedPointConfig,
)
from tests.tensor_test_case import assertTensorEqual


Expand Down
3 changes: 2 additions & 1 deletion tests/nn/test_conv1d.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
import torch
from fixed_point.conv1d import FPConv1d

from elasticai.creator.nn.fixed_point.conv1d import FPConv1d


def make_conv1d() -> FPConv1d:
Expand Down
4 changes: 3 additions & 1 deletion tests/nn/test_fp_precomputed_module.py
Original file line number Diff line number Diff line change
@@ -1,9 +1,11 @@
from typing import cast

import torch
from fixed_point.precomputed.fp_precomputed_module import FPPrecomputedModule

from elasticai.creator.file_generation.in_memory_path import InMemoryFile, InMemoryPath
from elasticai.creator.nn.fixed_point.precomputed.fp_precomputed_module import (
FPPrecomputedModule,
)


def test_vhdl_code_matches_expected_for_tanh_as_base_module() -> None:
Expand Down
3 changes: 1 addition & 2 deletions tests/nn/test_hard_sigmoid.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,7 @@
from typing import cast

from fixed_point.hard_sigmoid import FPHardSigmoid

from elasticai.creator.file_generation.in_memory_path import InMemoryFile, InMemoryPath
from elasticai.creator.nn.fixed_point.hard_sigmoid import FPHardSigmoid


def test_vhdl_code_matches_expected() -> None:
Expand Down
3 changes: 1 addition & 2 deletions tests/nn/test_hard_tanh.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,7 @@
from typing import cast

from fixed_point.hard_tanh import FPHardTanh

from elasticai.creator.file_generation.in_memory_path import InMemoryFile, InMemoryPath
from elasticai.creator.nn.fixed_point.hard_tanh import FPHardTanh


def test_vhdl_code_matches_expected() -> None:
Expand Down
3 changes: 1 addition & 2 deletions tests/nn/test_relu.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,7 @@
from typing import cast

from fixed_point.relu import FPReLU

from elasticai.creator.file_generation.in_memory_path import InMemoryFile, InMemoryPath
from elasticai.creator.nn.fixed_point.relu import FPReLU


def test_vhdl_code_matches_expected() -> None:
Expand Down

0 comments on commit 6eac561

Please sign in to comment.