-
Notifications
You must be signed in to change notification settings - Fork 97
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
- Loading branch information
1 parent
fbc8a9d
commit 10356b8
Showing
2 changed files
with
107 additions
and
2 deletions.
There are no files selected for viewing
101 changes: 101 additions & 0 deletions
101
tests/ttnn/unit_tests/operations/eltwise/test_binary_comp_init.py
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,101 @@ | ||
# SPDX-FileCopyrightText: © 2023 Tenstorrent Inc. | ||
|
||
# SPDX-License-Identifier: Apache-2.0 | ||
|
||
import torch | ||
import pytest | ||
import ttnn | ||
from tests.ttnn.unit_tests.operations.eltwise.backward.utility_funcs import data_gen_with_range | ||
from models.utility_functions import is_grayskull, skip_for_blackhole | ||
|
||
|
||
@skip_for_blackhole("Mismatching on BH, see #12349") | ||
@pytest.mark.parametrize( | ||
"input_shapes", | ||
((torch.Size([1, 1, 32, 32])),), | ||
) | ||
@pytest.mark.parametrize( | ||
"mem_configs", | ||
( | ||
ttnn.MemoryConfig(ttnn.TensorMemoryLayout.INTERLEAVED, ttnn.BufferType.DRAM), | ||
ttnn.MemoryConfig(ttnn.TensorMemoryLayout.INTERLEAVED, ttnn.BufferType.L1), | ||
), | ||
) | ||
@pytest.mark.parametrize("out_dtype", (ttnn.uint32, ttnn.uint16)) | ||
@pytest.mark.parametrize( | ||
"ttnn_function", | ||
(ttnn.lt, ttnn.gt, ttnn.eq, ttnn.le, ttnn.ge, ttnn.ne, ttnn.logical_and, ttnn.logical_or, ttnn.logical_xor), | ||
) | ||
def test_binary_comp_ops(input_shapes, out_dtype, mem_configs, ttnn_function, device): | ||
if is_grayskull(): | ||
pytest.skip("GS does not support fp32/uint32/uint16 data types") | ||
|
||
in_data, input_tensor = data_gen_with_range(input_shapes, -100, 100, device, True) | ||
other_data, other_tensor = data_gen_with_range(input_shapes, -90, 100, device, True) | ||
|
||
cq_id = 0 | ||
mem_cfg = mem_configs | ||
|
||
tt_output_tensor_on_device = ttnn_function( | ||
input_tensor, other_tensor, memory_config=mem_cfg, dtype=out_dtype, queue_id=cq_id | ||
) | ||
|
||
golden_fn = ttnn.get_golden_function(ttnn_function) | ||
golden_tensor = golden_fn(in_data, other_data) | ||
golden_tensor = golden_tensor.int() | ||
|
||
output_tensor = ttnn.to_torch(tt_output_tensor_on_device) | ||
|
||
are_equal = torch.equal(output_tensor, golden_tensor) | ||
assert are_equal | ||
|
||
|
||
@skip_for_blackhole("Mismatching on BH, see #12349") | ||
@pytest.mark.parametrize( | ||
"input_shapes", | ||
((torch.Size([1, 1, 32, 32])),), | ||
) | ||
@pytest.mark.parametrize( | ||
"mem_configs", | ||
( | ||
ttnn.MemoryConfig(ttnn.TensorMemoryLayout.INTERLEAVED, ttnn.BufferType.DRAM), | ||
ttnn.MemoryConfig(ttnn.TensorMemoryLayout.INTERLEAVED, ttnn.BufferType.L1), | ||
), | ||
) | ||
@pytest.mark.parametrize( | ||
"scalar", | ||
{2.3, 15.6, 55.4, 72.5, 120.6}, | ||
) | ||
@pytest.mark.parametrize("out_dtype", (ttnn.uint32, ttnn.uint16)) | ||
@pytest.mark.parametrize( | ||
"ttnn_function", | ||
( | ||
ttnn.lt, | ||
ttnn.gt, | ||
ttnn.eq, | ||
ttnn.le, | ||
ttnn.ge, | ||
ttnn.ne, | ||
), | ||
) | ||
def test_binary_comp_ops_scalar(input_shapes, scalar, out_dtype, mem_configs, ttnn_function, device): | ||
if is_grayskull(): | ||
pytest.skip("GS does not support fp32/uint32/uint16 data types") | ||
|
||
in_data, input_tensor = data_gen_with_range(input_shapes, -100, 100, device, True) | ||
|
||
cq_id = 0 | ||
mem_cfg = mem_configs | ||
|
||
tt_output_tensor_on_device = ttnn_function( | ||
input_tensor, scalar, memory_config=mem_cfg, dtype=out_dtype, queue_id=cq_id | ||
) | ||
|
||
golden_fn = ttnn.get_golden_function(ttnn_function) | ||
golden_tensor = golden_fn(in_data, scalar) | ||
golden_tensor = golden_tensor.int() | ||
|
||
output_tensor = ttnn.to_torch(tt_output_tensor_on_device) | ||
|
||
are_equal = torch.equal(output_tensor, golden_tensor) | ||
assert are_equal |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters