From 13b9e3deb260e10cae17bf1f3b94b5844922757a Mon Sep 17 00:00:00 2001 From: SarahByrneIntel <135850186+SarahByrneIntel@users.noreply.github.com> Date: Fri, 5 Jul 2024 12:04:19 +0100 Subject: [PATCH] Adding support and testing for chunk tensor operation (#90) * Add support and test for chunk tensor op * Fix for chunk tensor op --------- Co-authored-by: SarahByrneIntel --- .../backend/tensor.py | 37 +++++++++++++++++++ test/python/test_tensor.py | 35 +++++++++++++++++- 2 files changed, 71 insertions(+), 1 deletion(-) diff --git a/intel_npu_acceleration_library/backend/tensor.py b/intel_npu_acceleration_library/backend/tensor.py index 8e85593..e1a598c 100644 --- a/intel_npu_acceleration_library/backend/tensor.py +++ b/intel_npu_acceleration_library/backend/tensor.py @@ -899,6 +899,43 @@ def sum( sum = sum.to(dtype) return sum + def chunk( + self, + chunks: int, + dim: int = 0, + ) -> Union["Tensor", list]: + """ + Return the list of tensor chunks. + + Args: + chunks (int): The number of chunks to return. + dim (int): The dimension along which to split the tensor. Default is 0. + + Returns: + Union["Tensor", list]: The resulting list of split tensors or a single tensor. + + Raises: + ValueError: The input chunks value is not valid. + """ + if chunks <= 0: + raise ValueError("The input chunks value is not valid.") + if chunks == 1: + return self + tensors = [] + remainder = self.shape[dim] % chunks + chunk_size = self.shape[dim] // chunks + (1 if remainder > 0 else 0) + num_dims = self.dim() + + start_idx = 0 + for _ in range(chunks): + indexes = [slice(None)] * num_dims + end_idx = start_idx + chunk_size + end_idx = end_idx if end_idx < self.shape[dim] else self.shape[dim] + indexes[dim] = slice(start_idx, end_idx) + tensors.append(self.__getitem__(tuple(indexes))) + start_idx = end_idx + return tensors + def to(self, dtype: NPUDtype) -> "Tensor": """ Convert the tensor to the specified data type. diff --git a/test/python/test_tensor.py b/test/python/test_tensor.py index 081d41b..67cc7ba 100644 --- a/test/python/test_tensor.py +++ b/test/python/test_tensor.py @@ -270,7 +270,6 @@ def test_reduce_operations(batch, hidden_dim, axis, op): reference = eval(f"X.{op}(dim=axis)") reference = reference.numpy() - print(X.sum()) model = NNFactory() t1 = model.parameter(X.shape) _ = eval(f"t1.{op}()") if axis is None else eval(f"t1.{op}(dim=axis)") @@ -326,3 +325,37 @@ def act(a, b): ) < 0.001 ) + + +@pytest.mark.parametrize("batch", [16, 128]) +@pytest.mark.parametrize("hidden_dim", [256, 512]) +@pytest.mark.parametrize("chunks", [1, 2, 3, 4]) +@pytest.mark.parametrize("axis", [0, 1, -1, -2]) +def test_chunk_operation(batch, hidden_dim, chunks, axis): + + X = torch.rand((batch, hidden_dim)).to(torch.float16) + + reference = X.chunk(chunks=chunks, dim=axis) + + model = NNFactory() + t1 = model.parameter(X.shape) + _ = t1.chunk(chunks=chunks, dim=axis) + model.compile() + + result = model(X) + + if chunks == 1: + assert np.isfinite( + reference[0].numpy() + ).all(), "Pytorch Reference contains NaN or Inf" + assert np.isfinite(result.numpy()).all(), "NPU output contains NaN or Inf" + assert 1 - r2_score(reference[0].numpy(), result.numpy()) < 0.01 + else: + for i in range(len(reference)): + assert np.isfinite( + reference[i].numpy() + ).all(), "Pytorch Reference contains NaN or Inf" + assert np.isfinite( + result[i].numpy() + ).all(), "NPU output contains NaN or Inf" + assert 1 - r2_score(reference[i].numpy(), result[i].numpy()) < 0.01