Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

#14448: Update examples of binary ops #14468

Merged
merged 1 commit into from
Oct 30, 2024
Merged
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
56 changes: 28 additions & 28 deletions ttnn/cpp/ttnn/operations/eltwise/binary/binary_pybind.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -44,8 +44,8 @@ void bind_primitive_binary_operation(py::module& module, const binary_operation_

Example:

>>> tensor1 = ttnn.to_device(ttnn.from_torch(torch.tensor((1, 2), dtype=torch.bfloat16)), device)
>>> tensor2 = ttnn.to_device(ttnn.from_torch(torch.tensor((0, 1), dtype=torch.bfloat16)), device)
>>> tensor1 = ttnn.to_device(ttnn.from_torch(torch.tensor(([[1, 2], [3, 4]]), dtype=torch.bfloat16)), device)
>>> tensor2 = ttnn.to_device(ttnn.from_torch(torch.tensor(([[1, 2], [3, 4]]), dtype=torch.bfloat16)), device)
>>> output = {1}(tensor1, tensor2)
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

What was the issue ? Is it zero or the format how it passed?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The format that we passed.
torch.tensor((1, 2), dtype=torch.bfloat16) creates a tensor of rank 1 with elements [1 2].
According to the documentation, most of the ops support ranks of 2, 3, 4. That's why we passed the input as
[1 2
3 4]

)doc",
operation.base_name(),
Expand Down Expand Up @@ -110,8 +110,8 @@ void bind_binary_operation(py::module& module, const binary_operation_t& operati

Example:

>>> tensor1 = ttnn.to_device(ttnn.from_torch(torch.tensor((1, 2), dtype=torch.bfloat16)), device=device)
>>> tensor2 = ttnn.to_device(ttnn.from_torch(torch.tensor((0, 1), dtype=torch.bfloat16)), device=device)
>>> tensor1 = ttnn.to_device(ttnn.from_torch(torch.tensor(([[1, 2], [3, 4]]), dtype=torch.bfloat16)), device=device)
>>> tensor2 = ttnn.to_device(ttnn.from_torch(torch.tensor(([[1, 2], [3, 4]]), dtype=torch.bfloat16)), device=device)
>>> output = {1}(tensor1, tensor2)
)doc",
operation.base_name(),
Expand Down Expand Up @@ -195,8 +195,8 @@ void bind_binary_composite(py::module& module, const binary_operation_t& operati
{4}

Example:
>>> tensor1 = ttnn.to_device(ttnn.from_torch(torch.tensor((1, 2), dtype=torch.bfloat16)), device=device)
>>> tensor2 = ttnn.to_device(ttnn.from_torch(torch.tensor((0, 1), dtype=torch.bfloat16)), device=device)
>>> tensor1 = ttnn.to_device(ttnn.from_torch(torch.tensor(([[1, 2], [3, 4]]), dtype=torch.bfloat16)), device=device)
>>> tensor2 = ttnn.to_device(ttnn.from_torch(torch.tensor(([[1, 2], [3, 4]]), dtype=torch.bfloat16)), device=device)
>>> output = {1}(tensor1, tensor2)

)doc",
Expand Down Expand Up @@ -246,8 +246,8 @@ void bind_binary_composite_with_alpha(py::module& module, const binary_operation

Example:

>>> tensor1 = ttnn.to_device(ttnn.from_torch(torch.tensor((1, 2), dtype=torch.bfloat16)), device=device)
>>> tensor2 = ttnn.to_device(ttnn.from_torch(torch.tensor((0, 1), dtype=torch.bfloat16)), device=device)
>>> tensor1 = ttnn.to_device(ttnn.from_torch(torch.tensor(([[1, 2], [3, 4]]), dtype=torch.bfloat16)), device=device)
>>> tensor2 = ttnn.to_device(ttnn.from_torch(torch.tensor(([[1, 2], [3, 4]]), dtype=torch.bfloat16)), device=device)
>>> output = {1}(tensor1, tensor2, alpha)
)doc",
operation.base_name(),
Expand Down Expand Up @@ -286,9 +286,9 @@ void bind_binary_composite_with_rtol_atol(py::module& module, const binary_opera
Args:
input_tensor_a (ttnn.Tensor): the input tensor.
input_tensor_b (ttnn.Tensor): the input tensor.
rtol (float)
atol (float)
equal_nan (bool)
rtol (float): relative tolerance.
atol (float): absolute tolerance.
equal_nan (bool): if NaN values should be treated as equal during comparison.

Keyword args:
memory_config (ttnn.MemoryConfig, optional): Memory configuration for the operation. Defaults to `None`.
Expand All @@ -297,8 +297,8 @@ void bind_binary_composite_with_rtol_atol(py::module& module, const binary_opera
ttnn.Tensor: the output tensor.

Example:
>>> tensor1 = ttnn.to_device(ttnn.from_torch(torch.tensor((1, 2), dtype=torch.bfloat16)), device=device)
>>> tensor2 = ttnn.to_device(ttnn.from_torch(torch.tensor((0, 1), dtype=torch.bfloat16)), device=device)
>>> tensor1 = ttnn.to_device(ttnn.from_torch(torch.tensor(([[1, 2], [3, 4]]), dtype=torch.bfloat16)), device=device)
>>> tensor2 = ttnn.to_device(ttnn.from_torch(torch.tensor(([[1, 2], [3, 4]]), dtype=torch.bfloat16)), device=device)
>>> output = {1}(tensor1, tensor2, rtol, atol, equal_nan)

)doc",
Expand Down Expand Up @@ -352,8 +352,8 @@ void bind_binary_composite_overload(py::module& module, const binary_operation_t

Example:

>>> tensor1 = ttnn.to_device(ttnn.from_torch(torch.tensor((1, 2), dtype=torch.bfloat16)), device)
>>> tensor2 = ttnn.to_device(ttnn.from_torch(torch.tensor((0, 1), dtype=torch.bfloat16)), device)
>>> tensor1 = ttnn.to_device(ttnn.from_torch(torch.tensor(([[1, 2], [3, 4]]), dtype=torch.bfloat16)), device)
>>> tensor2 = ttnn.to_device(ttnn.from_torch(torch.tensor(([[1, 2], [3, 4]]), dtype=torch.bfloat16)), device)
>>> output = {1}(tensor1, tensor2/scalar)
)doc",
operation.base_name(),
Expand Down Expand Up @@ -414,8 +414,8 @@ void bind_div(py::module& module, const binary_operation_t& operation, const std
ttnn.Tensor: the output tensor.

Example:
>>> tensor1 = ttnn.to_device(ttnn.from_torch(torch.tensor((1, 2), dtype=torch.bfloat16)), device=device)
>>> tensor2 = ttnn.to_device(ttnn.from_torch(torch.tensor((0, 1), dtype=torch.bfloat16)), device=device)
>>> tensor1 = ttnn.to_device(ttnn.from_torch(torch.tensor(([[1, 2], [3, 4]]), dtype=torch.bfloat16)), device=device)
>>> tensor2 = ttnn.to_device(ttnn.from_torch(torch.tensor(([[1, 2], [3, 4]]), dtype=torch.bfloat16)), device=device)
>>> output = {1}(tensor1, tensor2/scalar)

)doc",
Expand Down Expand Up @@ -481,7 +481,7 @@ void bind_polyval(py::module& module, const binary_operation_t& operation, const

Args:
input_tensor (ttnn.Tensor): the input tensor.
Coeffs (Vector of floats).
Coeffs (Vector of floats): coefficients of the polynomial.

Keyword args:
memory_config (ttnn.MemoryConfig, optional): Memory configuration for the operation. Defaults to `None`.
Expand All @@ -493,7 +493,7 @@ void bind_polyval(py::module& module, const binary_operation_t& operation, const
{4}

Example:
>>> tensor = ttnn.to_device(ttnn.from_torch(torch.tensor((1, 2), dtype=torch.bfloat16)), device=device)
>>> tensor = ttnn.to_device(ttnn.from_torch(torch.tensor(([[1, 2], [3, 4]]), dtype=torch.bfloat16)), device=device)
>>> coeffs = (1, 2, 3, 4)
>>> output = {1}(tensor, coeffs)

Expand Down Expand Up @@ -538,8 +538,8 @@ void bind_binary_overload_operation(py::module& module, const binary_operation_t
Returns:
ttnn.Tensor: the output tensor.

Example::
>>> tensor = ttnn.from_torch(torch.tensor((1, 2), dtype=torch.bfloat16), device=device)
Example:
>>> tensor = ttnn.from_torch(torch.tensor(([[1, 2], [3, 4]]), dtype=torch.bfloat16), device=device)
>>> output = {1}(tensor1, tensor2)
)doc",
operation.base_name(),
Expand Down Expand Up @@ -590,10 +590,10 @@ void bind_inplace_operation(py::module& module, const binary_operation_t& operat
input_tensor_b (ttnn.Tensor or Number): the input tensor.

Returns:
List of ttnn.Tensor: the output tensor.
ttnn.Tensor: the output tensor.

Example::
>>> tensor = ttnn.from_torch(torch.tensor((1, 2), dtype=torch.bfloat16), device=device)
Example:
>>> tensor = ttnn.from_torch(torch.tensor(([[1, 2], [3, 4]]), dtype=torch.bfloat16), device=device)
>>> output = {1}(tensor1, tensor2)
)doc",
operation.base_name(),
Expand Down Expand Up @@ -639,14 +639,14 @@ void bind_logical_inplace_operation(py::module& module, const binary_operation_t
input_tensor_b (ttnn.Tensor): the input tensor.

Returns:
List of ttnn.Tensor: the output tensor.
ttnn.Tensor: the output tensor.

Note:
{4}

Example:
>>> tensor1 = ttnn.from_torch(torch.tensor((1, 2), dtype=torch.bfloat16), device=device)
>>> tensor2 = ttnn.from_torch(torch.tensor((1, 2), dtype=torch.bfloat16), device=device)
>>> tensor1 = ttnn.from_torch(torch.tensor(([[1, 2], [3, 4]]), dtype=torch.bfloat16), device=device)
>>> tensor2 = ttnn.from_torch(torch.tensor(([[1, 2], [3, 4]]), dtype=torch.bfloat16), device=device)
>>> output = {1}(tensor1, tensor2)
)doc",
operation.base_name(),
Expand Down Expand Up @@ -681,7 +681,7 @@ void bind_binary_inplace_operation(py::module& module, const binary_operation_t&
Keyword args:
* :attr:`activations` (Optional[List[str]]): list of activation functions to apply to the output tensor
Example::
>>> tensor = ttnn.from_torch(torch.tensor((1, 2), dtype=torch.bfloat16), device=device)
>>> tensor = ttnn.from_torch(torch.tensor(([[1, 2], [3, 4]]), dtype=torch.bfloat16), device=device)
>>> output = {1}(tensor1, tensor2)
)doc",
operation.base_name(),
Expand Down
Loading