Skip to content

Commit

Permalink
Adding support and testing for chunk tensor operation (#90)
Browse files Browse the repository at this point in the history
* Add support and test for chunk tensor op

* Fix for chunk tensor op

---------

Co-authored-by: SarahByrneIntel <sarahbyrne@intel.com>
  • Loading branch information
SarahByrneIntel and SarahByrneIntel authored Jul 5, 2024
1 parent 66c1205 commit 13b9e3d
Show file tree
Hide file tree
Showing 2 changed files with 71 additions and 1 deletion.
37 changes: 37 additions & 0 deletions intel_npu_acceleration_library/backend/tensor.py
Original file line number Diff line number Diff line change
Expand Up @@ -899,6 +899,43 @@ def sum(
sum = sum.to(dtype)
return sum

def chunk(
self,
chunks: int,
dim: int = 0,
) -> Union["Tensor", list]:
"""
Return the list of tensor chunks.
Args:
chunks (int): The number of chunks to return.
dim (int): The dimension along which to split the tensor. Default is 0.
Returns:
Union["Tensor", list]: The resulting list of split tensors or a single tensor.
Raises:
ValueError: The input chunks value is not valid.
"""
if chunks <= 0:
raise ValueError("The input chunks value is not valid.")
if chunks == 1:
return self
tensors = []
remainder = self.shape[dim] % chunks
chunk_size = self.shape[dim] // chunks + (1 if remainder > 0 else 0)
num_dims = self.dim()

start_idx = 0
for _ in range(chunks):
indexes = [slice(None)] * num_dims
end_idx = start_idx + chunk_size
end_idx = end_idx if end_idx < self.shape[dim] else self.shape[dim]
indexes[dim] = slice(start_idx, end_idx)
tensors.append(self.__getitem__(tuple(indexes)))
start_idx = end_idx
return tensors

def to(self, dtype: NPUDtype) -> "Tensor":
"""
Convert the tensor to the specified data type.
Expand Down
35 changes: 34 additions & 1 deletion test/python/test_tensor.py
Original file line number Diff line number Diff line change
Expand Up @@ -270,7 +270,6 @@ def test_reduce_operations(batch, hidden_dim, axis, op):
reference = eval(f"X.{op}(dim=axis)")
reference = reference.numpy()

print(X.sum())
model = NNFactory()
t1 = model.parameter(X.shape)
_ = eval(f"t1.{op}()") if axis is None else eval(f"t1.{op}(dim=axis)")
Expand Down Expand Up @@ -326,3 +325,37 @@ def act(a, b):
)
< 0.001
)


@pytest.mark.parametrize("batch", [16, 128])
@pytest.mark.parametrize("hidden_dim", [256, 512])
@pytest.mark.parametrize("chunks", [1, 2, 3, 4])
@pytest.mark.parametrize("axis", [0, 1, -1, -2])
def test_chunk_operation(batch, hidden_dim, chunks, axis):

X = torch.rand((batch, hidden_dim)).to(torch.float16)

reference = X.chunk(chunks=chunks, dim=axis)

model = NNFactory()
t1 = model.parameter(X.shape)
_ = t1.chunk(chunks=chunks, dim=axis)
model.compile()

result = model(X)

if chunks == 1:
assert np.isfinite(
reference[0].numpy()
).all(), "Pytorch Reference contains NaN or Inf"
assert np.isfinite(result.numpy()).all(), "NPU output contains NaN or Inf"
assert 1 - r2_score(reference[0].numpy(), result.numpy()) < 0.01
else:
for i in range(len(reference)):
assert np.isfinite(
reference[i].numpy()
).all(), "Pytorch Reference contains NaN or Inf"
assert np.isfinite(
result[i].numpy()
).all(), "NPU output contains NaN or Inf"
assert 1 - r2_score(reference[i].numpy(), result[i].numpy()) < 0.01

0 comments on commit 13b9e3d

Please sign in to comment.