From 358aa07d711a25bf59fde7245d386ef324cbb65a Mon Sep 17 00:00:00 2001 From: mcw-anasuya Date: Fri, 8 Nov 2024 14:33:03 +0000 Subject: [PATCH] #14782: Update new table structure --- docs/source/ttnn/ttnn/api.rst | 3 +- .../unary_backward/unary_backward_pybind.hpp | 1078 ++++++----------- 2 files changed, 394 insertions(+), 687 deletions(-) diff --git a/docs/source/ttnn/ttnn/api.rst b/docs/source/ttnn/ttnn/api.rst index e91e24ac3976..722487c57708 100644 --- a/docs/source/ttnn/ttnn/api.rst +++ b/docs/source/ttnn/ttnn/api.rst @@ -194,6 +194,7 @@ Pointwise Unary ttnn.threshold ttnn.trunc ttnn.clamp_bw + ttnn.clip_bw ttnn.hardtanh_bw ttnn.threshold_bw ttnn.softplus_bw @@ -203,7 +204,6 @@ Pointwise Unary ttnn.exp_bw ttnn.tanh_bw ttnn.sqrt_bw - ttnn.assign_bw ttnn.multigammaln_bw ttnn.lgamma_bw ttnn.fill_bw @@ -341,6 +341,7 @@ Pointwise Binary ttnn.scatter ttnn.atan2 ttnn.add_bw + ttnn.assign_bw ttnn.atan2_bw ttnn.div_bw ttnn.embedding_bw diff --git a/ttnn/cpp/ttnn/operations/eltwise/unary_backward/unary_backward_pybind.hpp b/ttnn/cpp/ttnn/operations/eltwise/unary_backward/unary_backward_pybind.hpp index c42ea9e5a960..6f80919442f6 100644 --- a/ttnn/cpp/ttnn/operations/eltwise/unary_backward/unary_backward_pybind.hpp +++ b/ttnn/cpp/ttnn/operations/eltwise/unary_backward/unary_backward_pybind.hpp @@ -22,7 +22,11 @@ namespace detail { template void bind_unary_backward_two_float( - py::module& module, const unary_backward_operation_t& operation, const std::string_view description, const std::string_view supported_dtype = "") { + py::module& module, + const unary_backward_operation_t& operation, + const std::string_view description, + const std::string& supported_dtype = "BFLOAT16", + const std::string& note = "") { auto doc = fmt::format( R"doc( @@ -35,13 +39,25 @@ void bind_unary_backward_two_float( value (float): the input value. Keyword args: - memory_config (ttnn.MemoryConfig, optional): Memory configuration for the operation. Defaults to `None`. + memory_config (ttnn.MemoryConfig, optional): memory configuration for the operation. Defaults to `None`. Returns: List of ttnn.Tensor: the output tensor. Note: - {3} + Supported dtypes, layouts, and ranks: + + .. list-table:: + :header-rows: 1 + + * - Dtypes + - Layouts + - Ranks + * - {3} + - TILE + - 2, 3, 4 + + {4} Example: @@ -52,7 +68,8 @@ void bind_unary_backward_two_float( operation.base_name(), operation.python_fully_qualified_name(), description, - supported_dtype); + supported_dtype, + note); bind_registered_operation( module, @@ -77,7 +94,12 @@ void bind_unary_backward_two_float( template void bind_unary_backward_op( - py::module& module, const unary_backward_operation_t& operation, const std::string& description, const std::string& supported_dtype = "") { + py::module& module, + const unary_backward_operation_t& operation, + const std::string& description, + const std::string& supported_dtype = "BFLOAT16", + const std::string& layout = "TILE", + const std::string& note = "") { auto doc = fmt::format( R"doc( {2} @@ -87,13 +109,25 @@ void bind_unary_backward_op( input_tensor_a (ttnn.Tensor): the input tensor. Keyword args: - memory_config (ttnn.MemoryConfig, optional): Memory configuration for the operation. Defaults to `None`. + memory_config (ttnn.MemoryConfig, optional): memory configuration for the operation. Defaults to `None`. Returns: List of ttnn.Tensor: the output tensor. Note: - {3} + Supported dtypes, layouts, and ranks: + + .. list-table:: + :header-rows: 1 + + * - Dtypes + - Layouts + - Ranks + * - {3} + - {4} + - 2, 3, 4 + + {5} Example: @@ -104,7 +138,9 @@ void bind_unary_backward_op( operation.base_name(), operation.python_fully_qualified_name(), description, - supported_dtype); + supported_dtype, + layout, + note); bind_registered_operation( module, @@ -125,7 +161,11 @@ void bind_unary_backward_op( template void bind_unary_backward_rsqrt( - py::module& module, const unary_backward_operation_t& operation, const std::string_view description, const std::string_view supported_dtype = "") { + py::module& module, + const unary_backward_operation_t& operation, + const std::string_view description, + const std::string& supported_dtype = "BFLOAT16", + const std::string& note = "") { auto doc = fmt::format( R"doc( @@ -136,7 +176,7 @@ void bind_unary_backward_rsqrt( input_tensor (ttnn.Tensor): the input tensor. Keyword args: - memory_config (ttnn.MemoryConfig, optional): Memory configuration for the operation. Defaults to `None`. + memory_config (ttnn.MemoryConfig, optional): memory configuration for the operation. Defaults to `None`. output_tensor (ttnn.Tensor, optional): preallocated output tensor. Defaults to `None`. queue_id (uint8, optional): command queue id. Defaults to `0`. @@ -144,7 +184,19 @@ void bind_unary_backward_rsqrt( List of ttnn.Tensor: the output tensor. Note: - {3} + Supported dtypes, layouts, and ranks: + + .. list-table:: + :header-rows: 1 + + * - Dtypes + - Layouts + - Ranks + * - {3} + - TILE + - 2, 3, 4 + + {4} Example: @@ -155,7 +207,8 @@ void bind_unary_backward_rsqrt( operation.base_name(), operation.python_fully_qualified_name(), description, - supported_dtype); + supported_dtype, + note); bind_registered_operation( module, @@ -180,7 +233,11 @@ void bind_unary_backward_rsqrt( template void bind_unary_backward_op_reciprocal( - py::module& module, const unary_backward_operation_t& operation, const std::string& description, const std::string_view supported_dtype = "") { + py::module& module, + const unary_backward_operation_t& operation, + const std::string& description, + const std::string& supported_dtype = "BFLOAT16", + const std::string& note = "") { auto doc = fmt::format( R"doc( {2} @@ -190,13 +247,25 @@ void bind_unary_backward_op_reciprocal( input_tensor_a (ComplexTensor or ttnn.Tensor): the input tensor. Keyword args: - memory_config (ttnn.MemoryConfig, optional): Memory configuration for the operation. Defaults to `None`. + memory_config (ttnn.MemoryConfig, optional): memory configuration for the operation. Defaults to `None`. Returns: List of ttnn.Tensor: the output tensor. Note: - {3} + Supported dtypes, layouts, and ranks: + + .. list-table:: + :header-rows: 1 + + * - Dtypes + - Layouts + - Ranks + * - {3} + - TILE + - 2, 3, 4 + + {4} Example: @@ -207,7 +276,8 @@ void bind_unary_backward_op_reciprocal( operation.base_name(), operation.python_fully_qualified_name(), description, - supported_dtype); + supported_dtype, + note); bind_registered_operation( module, @@ -240,7 +310,11 @@ void bind_unary_backward_op_reciprocal( template void bind_unary_backward_op_overload_abs( - py::module& module, const unary_backward_operation_t& operation, const std::string_view description, const std::string_view supported_dtype = "") { + py::module& module, + const unary_backward_operation_t& operation, + const std::string_view description, + const std::string& supported_dtype = "BFLOAT16", + const std::string& note = "") { auto doc = fmt::format( R"doc( {2} @@ -250,13 +324,25 @@ void bind_unary_backward_op_overload_abs( input_tensor (ComplexTensor or ttnn.Tensor): the input tensor. Keyword args: - memory_config (ttnn.MemoryConfig, optional): Memory configuration for the operation. Defaults to `None`. + memory_config (ttnn.MemoryConfig, optional): memory configuration for the operation. Defaults to `None`. Returns: List of ttnn.Tensor: the output tensor. Note: - {3} + Supported dtypes, layouts, and ranks: + + .. list-table:: + :header-rows: 1 + + * - Dtypes + - Layouts + - Ranks + * - {3} + - TILE + - 2, 3, 4 + + {4} Example: @@ -267,7 +353,8 @@ void bind_unary_backward_op_overload_abs( operation.base_name(), operation.python_fully_qualified_name(), description, - supported_dtype); + supported_dtype, + note); bind_registered_operation( module, @@ -317,7 +404,7 @@ void bind_unary_backward_float( {3} (float): {4}. Keyword args: - memory_config (ttnn.MemoryConfig, optional): Memory configuration for the operation. Defaults to `None`. + memory_config (ttnn.MemoryConfig, optional): memory configuration for the operation. Defaults to `None`. Returns: List of ttnn.Tensor: the output tensor. @@ -383,25 +470,38 @@ void bind_unary_backward_two_float_with_default( const std::string& parameter_b_doc, float parameter_b_value, const std::string_view description, - const std::string_view supported_dtype = "") { + const std::string& supported_dtype = "BFLOAT16", + const std::string& note="") { auto doc = fmt::format( R"doc( {8} Args: - grad_tensor (ComplexTensor or ttnn.Tensor): the input gradient tensor. - input_tensor (ComplexTensor or ttnn.Tensor): the input tensor. + grad_tensor (ttnn.Tensor): the input gradient tensor. + input_tensor (ttnn.Tensor): the input tensor. Keyword args: - {2} (float, optional): {3} , Defaults to {4}. - {5} (float, optional): {6} , Defaults to {7}. - memory_config (ttnn.MemoryConfig, optional): Memory configuration for the operation. Defaults to `None`. + {2} (float, optional): {3}. Defaults to `{4}`. + {5} (float, optional): {6}. Defaults to `{7}`. + memory_config (ttnn.MemoryConfig, optional): memory configuration for the operation. Defaults to `None`. Returns: List of ttnn.Tensor: the output tensor. Note: - {9} + Supported dtypes, layouts, and ranks: + + .. list-table:: + :header-rows: 1 + + * - Dtypes + - Layouts + - Ranks + * - {9} + - TILE + - 2, 3, 4 + + {10} Example: @@ -418,7 +518,8 @@ void bind_unary_backward_two_float_with_default( parameter_b_doc, parameter_b_value, description, - supported_dtype); + supported_dtype, + note); bind_registered_operation( module, @@ -449,7 +550,8 @@ void bind_unary_backward_float_with_default( const std::string& parameter_a_doc, float parameter_a_value, const std::string& description, - const std::string_view supported_dtype = "") { + const std::string& supported_dtype = "BFLOAT16", + const std::string& note="") { auto doc = fmt::format( R"doc( {5} @@ -460,14 +562,26 @@ void bind_unary_backward_float_with_default( Keyword args: - {2} (float, optional): {3} , Defaults to {4} - memory_config (ttnn.MemoryConfig, optional): Memory configuration for the operation. Defaults to `None`. + {2} (float, optional): {3}. Defaults to `{4}`. + memory_config (ttnn.MemoryConfig, optional): memory configuration for the operation. Defaults to `None`. Returns: List of ttnn.Tensor: the output tensor. Note: - {6} + Supported dtypes, layouts, and ranks: + + .. list-table:: + :header-rows: 1 + + * - Dtypes + - Layouts + - Ranks + * - {6} + - TILE + - 2, 3, 4 + + {7} Example: @@ -481,7 +595,8 @@ void bind_unary_backward_float_with_default( parameter_a_doc, parameter_a_value, description, - supported_dtype); + supported_dtype, + note); bind_registered_operation( module, @@ -513,7 +628,7 @@ void bind_unary_backward_optional_float_params_with_default( const std::string& parameter_b_doc, std::optional parameter_b_value, const std::string& description, - const std::string& suported_dtype = "BFLOAT16", + const std::string& supported_dtype = "BFLOAT16", const std::string& note = "") { auto doc = fmt::format( R"doc( @@ -524,9 +639,9 @@ void bind_unary_backward_optional_float_params_with_default( input_tensor (ttnn.Tensor): the input tensor. Keyword args: - {2} (float, optional): {3} , Default value = {4} - {5} (float, optional): {6} , Default value = {7} - memory_config (ttnn.MemoryConfig, optional): Memory configuration for the operation. Defaults to `None`. + {2} (float, optional): {3}. Defaults to `{4}`. + {5} (float, optional): {6}. Defaults to `{7}`. + memory_config (ttnn.MemoryConfig, optional): memory configuration for the operation. Defaults to `None`. Returns: List of ttnn.Tensor: the output tensor. @@ -562,7 +677,7 @@ void bind_unary_backward_optional_float_params_with_default( parameter_b_doc, parameter_b_value, description, - suported_dtype, + supported_dtype, note); bind_registered_operation( @@ -596,7 +711,8 @@ void bind_unary_backward_float_string_default( const std::string& parameter_b_doc, string parameter_b_value, const std::string_view description, - const std::string_view supported_dtype = "") { + const std::string& supported_dtype = "BFLOAT16", + const std::string& note = "") { auto doc = fmt::format( R"doc( {7} @@ -607,14 +723,26 @@ void bind_unary_backward_float_string_default( {2} (float): {3}. Keyword args: - {4} (string, optional): {5} , Defaults to {6}. - memory_config (ttnn.MemoryConfig, optional): Memory configuration for the operation. Defaults to `None`. + {4} (string, optional): {5}. Defaults to `{6}`. + memory_config (ttnn.MemoryConfig, optional): memory configuration for the operation. Defaults to `None`. Returns: List of ttnn.Tensor: the output tensor. Note: - {8} + Supported dtypes, layouts, and ranks: + + .. list-table:: + :header-rows: 1 + + * - Dtypes + - Layouts + - Ranks + * - {8} + - TILE + - 2, 3, 4 + + {9} Example: @@ -630,7 +758,8 @@ void bind_unary_backward_float_string_default( parameter_b_doc, parameter_b_value, description, - supported_dtype); + supported_dtype, + note); bind_registered_operation( module, @@ -660,22 +789,39 @@ void bind_unary_backward_string_default( const std::string& parameter_name_a, const std::string& parameter_a_doc, string parameter_a_value, - const std::string_view description) { + const std::string_view description, + const std::string& supported_dtype = "BFLOAT16", + const std::string& note = "") { auto doc = fmt::format( R"doc( {5} Args: grad_tensor (ttnn.Tensor): the input gradient tensor. - input_tensor_a (ttnn.Tensor): the input tensor. + input_tensor (ttnn.Tensor): the input tensor. Keyword args: - {2} (string, optional): {3} , Defaults to {4} - memory_config (ttnn.MemoryConfig, optional): Memory configuration for the operation. Defaults to `None`. + {2} (string, optional): {3}. Defaults to `{4}`. + memory_config (ttnn.MemoryConfig, optional): memory configuration for the operation. Defaults to `None`. Returns: List of ttnn.Tensor: the output tensor. + Note: + Supported dtypes, layouts, and ranks: + + .. list-table:: + :header-rows: 1 + + * - Dtypes + - Layouts + - Ranks + * - {6} + - TILE + - 2, 3, 4 + + {7} + Example: >>> grad_tensor = ttnn.to_device(ttnn.from_torch(torch.tensor((1, 2), dtype=torch.bfloat16)), device=device) @@ -687,7 +833,9 @@ void bind_unary_backward_string_default( parameter_name_a, parameter_a_doc, parameter_a_value, - description); + description, + supported_dtype, + note); bind_registered_operation( module, @@ -715,7 +863,8 @@ void bind_unary_backward_unary_optional_float( const std::string& parameter_name, const std::string& parameter_doc, const std::string_view description, - const std::string_view supported_dtype = "") { + const std::string& supported_dtype = "BFLOAT16", + const std::string& note = "") { auto doc = fmt::format( R"doc( {4} @@ -726,15 +875,27 @@ void bind_unary_backward_unary_optional_float( {2} (Number): {3}. Keyword args: - memory_config (ttnn.MemoryConfig, optional): Memory configuration for the operation. Defaults to `None`. - output_tensor (ttnn.Tensor, optional): Preallocated output tensor. Defaults to `None`. - queue_id (int, optional): Command queue id. Defaults to `0`. + memory_config (ttnn.MemoryConfig, optional): memory configuration for the operation. Defaults to `None`. + output_tensor (ttnn.Tensor, optional): preallocated output tensor. Defaults to `None`. + queue_id (int, optional): command queue id. Defaults to `0`. Returns: List of ttnn.Tensor: the output tensor. Note: - {5} + Supported dtypes, layouts, and ranks: + + .. list-table:: + :header-rows: 1 + + * - Dtypes + - Layouts + - Ranks + * - {5} + - TILE + - 2, 3, 4 + + {6} Example: @@ -747,7 +908,8 @@ void bind_unary_backward_unary_optional_float( parameter_name, parameter_doc, description, - supported_dtype); + supported_dtype, + note); bind_registered_operation( module, @@ -780,7 +942,8 @@ void bind_unary_backward_shape( const std::string& parameter_name_a, const std::string& parameter_a_doc, const std::string_view description, - const std::string_view supported_dtype) { + const std::string& supported_dtype = "BFLOAT16", + const std::string& note = "") { auto doc = fmt::format( R"doc( {4} @@ -791,13 +954,25 @@ void bind_unary_backward_shape( {2} (List[int]): {3}. Keyword args: - memory_config (ttnn.MemoryConfig, optional): Memory configuration for the operation. Defaults to `None`. + memory_config (ttnn.MemoryConfig, optional): memory configuration for the operation. Defaults to `None`. Returns: List of ttnn.Tensor: the output tensor. Note: - {5} + Supported dtypes, layouts, and ranks: + + .. list-table:: + :header-rows: 1 + + * - Dtypes + - Layouts + - Ranks + * - {5} + - TILE + - 4 + + {6} Example: @@ -810,7 +985,8 @@ void bind_unary_backward_shape( parameter_name_a, parameter_a_doc, description, - supported_dtype); + supported_dtype, + note); bind_registered_operation( module, @@ -833,18 +1009,23 @@ void bind_unary_backward_shape( template void bind_unary_backward_optional( - py::module& module, const unary_backward_operation_t& operation, const std::string_view description, const std::string& supported_dtype = "BFLOAT16", const std::string& layout = "TILE", const std::string& note="") { + py::module& module, + const unary_backward_operation_t& operation, + const std::string_view description, + const std::string& supported_dtype = "BFLOAT16", + const std::string& layout = "TILE", + const std::string& note="") { auto doc = fmt::format( R"doc( {2} Args: grad_tensor (ttnn.Tensor): the input gradient tensor. - input_tensor_a (ttnn.Tensor): the input tensor. + input_tensor (ttnn.Tensor): the input tensor. Keyword args: - memory_config (ttnn.MemoryConfig, optional): Memory configuration for the operation. Defaults to `None`. - output_tensor (ttnn.Tensor, optional): Preallocated output tensor. Defaults to `None`. + memory_config (ttnn.MemoryConfig, optional): memory configuration for the operation. Defaults to `None`. + output_tensor (ttnn.Tensor, optional): preallocated output tensor. Defaults to `None`. queue_id (int, optional): command queue id. Defaults to `0`. Returns: @@ -901,25 +1082,41 @@ void bind_unary_backward_optional( template void bind_unary_backward_neg( - py::module& module, const unary_backward_operation_t& operation, const std::string_view description, const std::string_view supported_dtype = "") { + py::module& module, + const unary_backward_operation_t& operation, + const std::string_view description, + const std::string& supported_dtype = "BFLOAT16", + const std::string& note = "") { auto doc = fmt::format( R"doc( {2} Args: - grad_tensor (ComplexTensor or ttnn.Tensor): the input gradient tensor. - input_tensor_a (ComplexTensor or ttnn.Tensor): the input tensor. + grad_tensor (ttnn.Tensor): the input gradient tensor. + input_tensor (ttnn.Tensor): the input tensor. Keyword args: - memory_config (ttnn.MemoryConfig, optional): Memory configuration for the operation. Defaults to `None`. - output_tensor (ttnn.Tensor, optional): Preallocated output tensor. Defaults to `None`. + memory_config (ttnn.MemoryConfig, optional): memory configuration for the operation. Defaults to `None`. + output_tensor (ttnn.Tensor, optional): preallocated output tensor. Defaults to `None`. queue_id (int, optional): command queue id. Defaults to `0`. Returns: List of ttnn.Tensor: the output tensor. Note: - {3} + Supported dtypes, layouts, and ranks: + + .. list-table:: + :header-rows: 1 + + * - Dtypes + - Layouts + - Ranks + * - {3} + - TILE + - 2, 3, 4 + + {4} Example: @@ -930,7 +1127,8 @@ void bind_unary_backward_neg( operation.base_name(), operation.python_fully_qualified_name(), description, - supported_dtype); + supported_dtype, + note); bind_registered_operation( module, @@ -957,16 +1155,16 @@ template void bind_unary_backward_prod_bw(py::module& module, const unary_backward_operation_t& operation) { auto doc = fmt::format( R"doc( - Performs backward operations for prod on input along `all_dimensions` or a particular `dim`. + Performs backward operations for prod on :attr:`input_tensor` with given :attr:`grad_tensor` along `all_dimensions` or a particular `dim`. Args: grad_tensor (ttnn.Tensor): the input gradient tensor. - input_tensor_a (ttnn.Tensor): the input tensor. + input_tensor (ttnn.Tensor): the input tensor. Keyword args: all_dimensions (bool, optional): perform prod backward along all dimensions, ignores dim param. Defaults to `True`. dim (int, optional): dimension to perform prod backward. Defaults to `0`. - memory_config (ttnn.MemoryConfig, optional): Memory configuration for the operation. Defaults to `None`. + memory_config (ttnn.MemoryConfig, optional): memory configuration for the operation. Defaults to `None`. Returns: List of ttnn.Tensor: the output tensor. @@ -1027,7 +1225,7 @@ void bind_unary_backward_opt(py::module& module, const unary_backward_operation_ Keyword args: - memory_config (ttnn.MemoryConfig, optional): Memory configuration for the operation. Defaults to `None`. + memory_config (ttnn.MemoryConfig, optional): memory configuration for the operation. Defaults to `None`. Example: @@ -1063,23 +1261,40 @@ void bind_unary_backward_opt(py::module& module, const unary_backward_operation_ template void bind_unary_backward( - py::module& module, const unary_backward_operation_t& operation, const std::string& description, const std::string& supported_dtype = "") { + py::module& module, + const unary_backward_operation_t& operation, + const std::string& description, + const std::string& supported_dtype = "BFLOAT16", + const std::string& layout = "TILE", + const std::string& note = "") { auto doc = fmt::format( R"doc( {2} Args: grad_tensor (ttnn.Tensor): the input gradient tensor. - input_tensor_a (ttnn.Tensor): the input tensor. + input_tensor (ttnn.Tensor): the input tensor. Keyword args: - memory_config (ttnn.MemoryConfig, optional): Memory configuration for the operation. Defaults to `None`. + memory_config (ttnn.MemoryConfig, optional): memory configuration for the operation. Defaults to `None`. Returns: List of ttnn.Tensor: the output tensor. Note: - {3} + Supported dtypes, layouts, and ranks: + + .. list-table:: + :header-rows: 1 + + * - Dtypes + - Layouts + - Ranks + * - {3} + - {4} + - 2, 3, 4 + + {5} Example: @@ -1090,7 +1305,9 @@ void bind_unary_backward( operation.base_name(), operation.python_fully_qualified_name(), description, - supported_dtype); + supported_dtype, + layout, + note); bind_registered_operation( module, @@ -1119,18 +1336,19 @@ void bind_unary_backward_gelu( const std::string& parameter_a_doc, string parameter_a_value, const std::string_view description, - const std::string_view supported_dtype = "") { + const std::string& supported_dtype = "BFLOAT16", + const std::string& note = "") { auto doc = fmt::format( R"doc( {5} Args: grad_tensor (ttnn.Tensor): the input gradient tensor. - input_tensor_a (ttnn.Tensor): the input tensor. + input_tensor (ttnn.Tensor): the input tensor. Keyword args: - {2}` (string): {3} , Defaults to {4}. - memory_config (ttnn.MemoryConfig, optional): Memory configuration for the operation. Defaults to `None`. + {2} (string): {3}. Defaults to `{4}`. + memory_config (ttnn.MemoryConfig, optional): memory configuration for the operation. Defaults to `None`. output_tensor (ttnn.Tensor, optional): preallocated output tensor. Defaults to `None`. queue_id (uint8, optional): command queue id. Defaults to `0`. @@ -1138,8 +1356,19 @@ void bind_unary_backward_gelu( List of ttnn.Tensor: the output tensor. Note: - {6} + Supported dtypes, layouts, and ranks: + .. list-table:: + :header-rows: 1 + + * - Dtypes + - Layouts + - Ranks + * - {6} + - TILE + - 2, 3, 4 + + {7} Example: @@ -1153,7 +1382,9 @@ void bind_unary_backward_gelu( parameter_a_doc, parameter_a_value, description, - supported_dtype); + supported_dtype, + note); + bind_registered_operation( module, operation, @@ -1189,7 +1420,7 @@ void py_module(py::module& module) { "max", "Maximum value", std::nullopt, - R"doc(Performs backward operations for clamp value on :attr:`input_tensor`, :attr:`min`, :attr:`max` with given :attr:`grad_tensor`.)doc", + R"doc(Performs backward operations for clamp on :attr:`input_tensor`, :attr:`min`, :attr:`max` with given :attr:`grad_tensor`.)doc", R"doc(BFLOAT16)doc", R"doc(Only one of `min` or `max` value can be `None`.)doc"); @@ -1215,14 +1446,7 @@ void py_module(py::module& module) { "max", "Maximum value", 1.0, - R"doc(Performs backward operations for hardtanh activation function on :attr:`input_tensor`, :attr:`min`, :attr:`max` with given :attr:`grad_tensor`.)doc", - R"doc(Supported dtypes, layouts, and ranks: - +----------------------------+---------------------------------+-------------------+ - | Dtypes | Layouts | Ranks | - +----------------------------+---------------------------------+-------------------+ - | BFLOAT16 | TILE | 2, 3, 4 | - +----------------------------+---------------------------------+-------------------+ - )doc"); + R"doc(Performs backward operations for hardtanh activation function on :attr:`input_tensor`, :attr:`min`, :attr:`max` with given :attr:`grad_tensor`.)doc"); detail::bind_unary_backward_float_with_default( @@ -1232,15 +1456,7 @@ void py_module(py::module& module) { "Lambda value for the hardshrink formula ", 0.5, R"doc(Performs backward operations for hardshrink on :attr:`input_tensor`, :attr:`lambd`, with given :attr:`grad_tensor`.)doc", - R"doc(Supported dtypes, layouts, and ranks: - - +----------------------------+---------------------------------+-------------------+ - | Dtypes | Layouts | Ranks | - +----------------------------+---------------------------------+-------------------+ - | BFLOAT16, BFLOAT8_B | TILE | 2, 3, 4 | - +----------------------------+---------------------------------+-------------------+ - - )doc"); + R"doc(BFLOAT16, BFLOAT8_B)doc"); detail::bind_unary_backward_float_with_default( module, @@ -1248,16 +1464,7 @@ void py_module(py::module& module) { "lambd", "Lambda value for the softshrink formula ", 0.5, - R"doc(Performs backward operations for softshrink on :attr:`input_tensor`, :attr:`lambd`, with given :attr:`grad_tensor`.)doc", - R"doc(Supported dtypes, layouts, and ranks: - - +----------------------------+---------------------------------+-------------------+ - | Dtypes | Layouts | Ranks | - +----------------------------+---------------------------------+-------------------+ - | BFLOAT16 | TILE | 2, 3, 4 | - +----------------------------+---------------------------------+-------------------+ - - )doc"); + R"doc(Performs backward operations for softshrink on :attr:`input_tensor`, :attr:`lambd`, with given :attr:`grad_tensor`.)doc"); detail::bind_unary_backward_float_with_default( module, @@ -1266,15 +1473,7 @@ void py_module(py::module& module) { "negative_slope value for the hardshrink formula ", 0.01, R"doc(Performs backward operations for leaky_relu on :attr:`input_tensor`, :attr:`negative_slope`, with given :attr:`grad_tensor`.)doc", - R"doc(Supported dtypes, layouts, and ranks: - - +----------------------------+---------------------------------+-------------------+ - | Dtypes | Layouts | Ranks | - +----------------------------+---------------------------------+-------------------+ - | BFLOAT16, BFLOAT8_B | TILE | 2, 3, 4 | - +----------------------------+---------------------------------+-------------------+ - - )doc"); + R"doc(BFLOAT16, BFLOAT8_B)doc"); detail::bind_unary_backward_float_with_default( module, @@ -1283,15 +1482,7 @@ void py_module(py::module& module) { "alpha value for the elu formula ", 1.0, R"doc(Performs backward operations for elu on :attr:`input_tensor`, :attr:`alpha`, with given :attr:`grad_tensor`.)doc", - R"doc(Supported dtypes, layouts, and ranks: - - +----------------------------+---------------------------------+-------------------+ - | Dtypes | Layouts | Ranks | - +----------------------------+---------------------------------+-------------------+ - | BFLOAT16, BFLOAT8_B | TILE | 2, 3, 4 | - +----------------------------+---------------------------------+-------------------+ - - )doc"); + R"doc(BFLOAT16, BFLOAT8_B)doc"); detail::bind_unary_backward_float_with_default( module, @@ -1299,16 +1490,7 @@ void py_module(py::module& module) { "alpha", "alpha value for the celu formula ", 1.0, - R"doc(Performs backward operations for celu on :attr:`input_tensor`, :attr:`alpha`, with given :attr:`grad_tensor`.)doc", - R"doc(Supported dtypes, layouts, and ranks: - - +----------------------------+---------------------------------+-------------------+ - | Dtypes | Layouts | Ranks | - +----------------------------+---------------------------------+-------------------+ - | BFLOAT16 | TILE | 2, 3, 4 | - +----------------------------+---------------------------------+-------------------+ - - )doc"); + R"doc(Performs backward operations for celu on :attr:`input_tensor`, :attr:`alpha`, with given :attr:`grad_tensor`.)doc"); detail::bind_unary_backward_float_with_default( module, @@ -1316,30 +1498,12 @@ void py_module(py::module& module) { "eps", "eps value for the logiteps formula ", 0.0, - R"doc(Performs backward operations for logiteps on :attr:`input_tensor`, :attr:`eps`, with given :attr:`grad_tensor`.)doc", - R"doc(Supported dtypes, layouts, and ranks: - - +----------------------------+---------------------------------+-------------------+ - | Dtypes | Layouts | Ranks | - +----------------------------+---------------------------------+-------------------+ - | BFLOAT16 | TILE | 2, 3, 4 | - +----------------------------+---------------------------------+-------------------+ - - )doc"); + R"doc(Performs backward operations for logiteps on :attr:`input_tensor`, :attr:`eps`, with given :attr:`grad_tensor`.)doc"); detail::bind_unary_backward_two_float( module, ttnn::threshold_bw, - R"doc(Performs backward operations for threshold on :attr:`input_tensor`, :attr:`threshold`, :attr:`value` with given :attr:`grad_tensor`.)doc", - R"doc(Supported dtypes, layouts, and ranks: - - +----------------------------+---------------------------------+-------------------+ - | Dtypes | Layouts | Ranks | - +----------------------------+---------------------------------+-------------------+ - | BFLOAT16 | TILE | 2, 3, 4 | - +----------------------------+---------------------------------+-------------------+ - - )doc"); + R"doc(Performs backward operations for threshold on :attr:`input_tensor`, :attr:`threshold`, :attr:`value` with given :attr:`grad_tensor`.)doc"); detail::bind_unary_backward_two_float_with_default( module, @@ -1350,16 +1514,7 @@ void py_module(py::module& module) { "threshold", "Threshold value", 20.0, - R"doc(Performs backward operations for softplus on :attr:`input_tensor`, :attr:`beta`, :attr:`threshold` with given :attr:`grad_tensor`.)doc", - R"doc(Supported dtypes, layouts, and ranks: - - +----------------------------+---------------------------------+-------------------+ - | Dtypes | Layouts | Ranks | - +----------------------------+---------------------------------+-------------------+ - | BFLOAT16 | TILE | 2, 3, 4 | - +----------------------------+---------------------------------+-------------------+ - - )doc"); + R"doc(Performs backward operations for softplus on :attr:`input_tensor`, :attr:`beta`, :attr:`threshold` with given :attr:`grad_tensor`.)doc"); detail::bind_unary_backward_float_string_default( module, @@ -1370,32 +1525,14 @@ void py_module(py::module& module) { "Mode of Rounding", "None", R"doc(Performs backward operations for Unary rdiv on :attr:`input_tensor`, :attr:`scalar` with given :attr:`grad_tensor` using given :attr:`round_mode`. - :attr:`round_mode` can be 'None', 'trunc', or 'floor'.)doc", - R"doc(Supported dtypes, layouts, and ranks: - - +----------------------------+---------------------------------+-------------------+ - | Dtypes | Layouts | Ranks | - +----------------------------+---------------------------------+-------------------+ - | BFLOAT16 | TILE | 2, 3, 4 | - +----------------------------+---------------------------------+-------------------+ - - )doc"); + :attr:`round_mode` can be 'None', 'trunc', or 'floor'.)doc"); detail::bind_unary_backward_shape( module, ttnn::repeat_bw, "shape", "Shape of tensor", - R"doc(Performs backward operations for repeat on :attr:`input_tensor`, with given :attr:`grad_tensor` using given :attr:`shape`.)doc", - R"doc(Supported dtypes, layouts, and ranks: - - +----------------------------+---------------------------------+-------------------+ - | Dtypes | Layouts | Ranks | - +----------------------------+---------------------------------+-------------------+ - | BFLOAT16 | TILE | 4 | - +----------------------------+---------------------------------+-------------------+ - - )doc"); + R"doc(Performs backward operations for repeat on :attr:`input_tensor`, with given :attr:`grad_tensor` using given :attr:`shape`.)doc"); detail::bind_unary_backward_gelu( module, @@ -1403,33 +1540,16 @@ void py_module(py::module& module) { "approximate", "Approximation type", "none", - R"doc(Performs backward operations for gelu on :attr:`input_tensor_a` or :attr:`input_tensor`, with given :attr:`grad_tensor` using given :attr:`approximate` mode. - :attr:`approximate` mode can be 'none', 'tanh'.)doc", - R"doc(Supported dtypes, layouts, and ranks: - - +----------------------------+---------------------------------+-------------------+ - | Dtypes | Layouts | Ranks | - +----------------------------+---------------------------------+-------------------+ - | BFLOAT16 | TILE | 2, 3, 4 | - +----------------------------+---------------------------------+-------------------+ - - )doc"); + R"doc(Performs backward operations for gelu on :attr:`input_tensor`, with given :attr:`grad_tensor` using given :attr:`approximate` mode. + :attr:`approximate` mode can be 'none', 'tanh'.)doc"); detail::bind_unary_backward_unary_optional_float( module, ttnn::pow_bw, "exponent", "Exponent value [must be non-negative]", - R"doc(Performs backward operations for power on :attr:`input_tensor` , :attr:`exponent` with given :attr:`grad_tensor`.)doc", - R"doc(Supported dtypes, layouts, and ranks: - - +----------------------------+---------------------------------+-------------------+ - | Dtypes | Layouts | Ranks | - +----------------------------+---------------------------------+-------------------+ - | BFLOAT16, BFLOAT8_B | TILE | 2, 3, 4 | - +----------------------------+---------------------------------+-------------------+ - - )doc"); + R"doc(Performs backward operations for power on :attr:`input_tensor`, :attr:`exponent` with given :attr:`grad_tensor`.)doc", + R"doc(BFLOAT16, BFLOAT8_B)doc"); detail::bind_unary_backward_optional( module, @@ -1450,34 +1570,17 @@ void py_module(py::module& module) { detail::bind_unary_backward( module, ttnn::multigammaln_bw, - R"doc(Performs backward operations for multigammaln on :attr:`input_tensor` with given :attr:`grad_tensor` and value of P is taken as 4. - mvlgamma is refered as multigammaln. - Input value must be greater than 2.5f)doc", - R"doc(Supported dtypes, layouts, and ranks: - - +----------------------------+---------------------------------+-------------------+ - | Dtypes | Layouts | Ranks | - +----------------------------+---------------------------------+-------------------+ - | BFLOAT16 | TILE | 2, 3, 4 | - +----------------------------+---------------------------------+-------------------+ - - )doc"); + R"doc(Performs backward operations for multivariate logarithmic gamma function (also referred to as mvlgamma) on :attr:`input_tensor` with given :attr:`grad_tensor`. + The dimensionality is set to 4.)doc", + R"doc(BFLOAT16)doc", R"doc(TILE)doc", + R"doc(Input value must be greater than 2.5f)doc"); detail::bind_unary_backward_prod_bw(module, ttnn::prod_bw); detail::bind_unary_backward( module, ttnn::lgamma_bw, - R"doc(Performs backward operations for lgamma on :attr:`input_tensor` with given :attr:`grad_tensor`.)doc", - R"doc(Supported dtypes, layouts, and ranks: - - +----------------------------+---------------------------------+-------------------+ - | Dtypes | Layouts | Ranks | - +----------------------------+---------------------------------+-------------------+ - | BFLOAT16 | TILE | 2, 3, 4 | - +----------------------------+---------------------------------+-------------------+ - - )doc"); + R"doc(Performs backward operations for lgamma on :attr:`input_tensor` with given :attr:`grad_tensor`.)doc"); detail::bind_unary_backward_optional( module, @@ -1488,256 +1591,107 @@ void py_module(py::module& module) { detail::bind_unary_backward( module, ttnn::hardsigmoid_bw, - R"doc(Performs backward operations for hardsigmoid on :attr:`input_tensor` with given :attr:`grad_tensor`.)doc", - R"doc(Supported dtypes, layouts, and ranks: - - +----------------------------+---------------------------------+-------------------+ - | Dtypes | Layouts | Ranks | - +----------------------------+---------------------------------+-------------------+ - | BFLOAT16 | TILE | 2, 3, 4 | - +----------------------------+---------------------------------+-------------------+ - - )doc"); + R"doc(Performs backward operations for hardsigmoid on :attr:`input_tensor` with given :attr:`grad_tensor`.)doc"); detail::bind_unary_backward( module, ttnn::cos_bw, R"doc(Performs backward operations for cosine on :attr:`input_tensor` with given :attr:`grad_tensor`.)doc", - R"doc(Supported dtypes, layouts, and ranks: - - +----------------------------+---------------------------------+-------------------+ - | Dtypes | Layouts | Ranks | - +----------------------------+---------------------------------+-------------------+ - | BFLOAT16, BFLOAT8_B | TILE | 2, 3, 4 | - +----------------------------+---------------------------------+-------------------+ - - )doc"); + R"doc(BFLOAT16, BFLOAT8_B)doc"); detail::bind_unary_backward( module, ttnn::acosh_bw, - R"doc(Performs backward operations for inverse hyperbolic cosine (acosh) on :attr:`input_tensor` with given :attr:`grad_tensor`.)doc", - R"doc(Supported dtypes, layouts, and ranks: - - +----------------------------+---------------------------------+-------------------+ - | Dtypes | Layouts | Ranks | - +----------------------------+---------------------------------+-------------------+ - | BFLOAT16 | TILE | 2, 3, 4 | - +----------------------------+---------------------------------+-------------------+ - - )doc"); + R"doc(Performs backward operations for inverse hyperbolic cosine (acosh) on :attr:`input_tensor` with given :attr:`grad_tensor`.)doc"); detail::bind_unary_backward_op( module, ttnn::acos_bw, - R"doc(Performs backward operations for inverse cosine (acos) on :attr:`input_tensor` with given :attr:`grad_tensor`.)doc", - R"doc(Supported dtypes, layouts, and ranks: - - +----------------------------+---------------------------------+-------------------+ - | Dtypes | Layouts | Ranks | - +----------------------------+---------------------------------+-------------------+ - | BFLOAT16 | TILE | 2, 3, 4 | - +----------------------------+---------------------------------+-------------------+ - - )doc"); + R"doc(Performs backward operations for inverse cosine (acos) on :attr:`input_tensor` with given :attr:`grad_tensor`.)doc"); detail::bind_unary_backward_op( module, ttnn::atan_bw, - R"doc(Performs backward operations for atan on :attr:`input_tensor` with given :attr:`grad_tensor`.)doc", - R"doc(Supported dtypes, layouts, and ranks: - - +----------------------------+---------------------------------+-------------------+ - | Dtypes | Layouts | Ranks | - +----------------------------+---------------------------------+-------------------+ - | BFLOAT16, BFLOAT8_B | TILE | 2, 3, 4 | - +----------------------------+---------------------------------+-------------------+ - - )doc"); + R"doc(Performs backward operations for inverse tangenr (atan) on :attr:`input_tensor` with given :attr:`grad_tensor`.)doc", + R"doc(BFLOAT16, BFLOAT8_B)doc"); detail::bind_unary_backward_op( module, ttnn::rad2deg_bw, R"doc(Performs backward operations for radian to degree conversion (rad2deg) on :attr:`input_tensor` with given :attr:`grad_tensor`.)doc", - R"doc(Supported dtypes, layouts, and ranks: - - +----------------------------+---------------------------------+-------------------+ - | Dtypes | Layouts | Ranks | - +----------------------------+---------------------------------+-------------------+ - | BFLOAT16, BFLOAT8_B | TILE | 2, 3, 4 | - +----------------------------+---------------------------------+-------------------+ - - )doc"); + R"doc(BFLOAT16, BFLOAT8_B)doc"); detail::bind_unary_backward_op( module, ttnn::frac_bw, R"doc(Performs backward operations for frac on :attr:`input_tensor` with given :attr:`grad_tensor`.)doc", - R"doc(Supported dtypes, layouts, and ranks: - - +----------------------------+---------------------------------+-------------------+ - | Dtypes | Layouts | Ranks | - +----------------------------+---------------------------------+-------------------+ - | BFLOAT16 | TILE, ROW_MAJOR | 2, 3, 4 | - +----------------------------+---------------------------------+-------------------+ - | BFLOAT8_B | TILE | 2, 3, 4 | - +----------------------------+---------------------------------+-------------------+ - - )doc"); + R"doc(BFLOAT16)doc", R"doc(TILE, ROW MAJOR)doc", + R"doc(BFLOAT8_B is supported for TILE layout.)doc"); detail::bind_unary_backward_op( module, ttnn::trunc_bw, R"doc(Performs backward operations for truncation on :attr:`input_tensor` with given :attr:`grad_tensor`.)doc", - R"doc(Supported dtypes, layouts, and ranks: - - +----------------------------+---------------------------------+-------------------+ - | Dtypes | Layouts | Ranks | - +----------------------------+---------------------------------+-------------------+ - | BFLOAT16 | TILE, ROW_MAJOR | 2, 3, 4 | - +----------------------------+---------------------------------+-------------------+ - - )doc"); + R"doc(BFLOAT16)doc", R"doc(TILE, ROW MAJOR)doc"); detail::bind_unary_backward_op( module, ttnn::log_sigmoid_bw, R"doc(Performs backward operations for log sigmoid on :attr:`input_tensor` with given :attr:`grad_tensor`.)doc", - R"doc(Supported dtypes, layouts, and ranks: - - +----------------------------+---------------------------------+-------------------+ - | Dtypes | Layouts | Ranks | - +----------------------------+---------------------------------+-------------------+ - | BFLOAT16, BFLOAT8_B | TILE | 2, 3, 4 | - +----------------------------+---------------------------------+-------------------+ - - )doc"); + R"doc(BFLOAT16, BFLOAT8_B)doc"); detail::bind_unary_backward_op( module, ttnn::fill_zero_bw, - R"doc(Performs backward operations of fill zero on :attr:`input_tensor` with given :attr:`grad_tensor`. Returns an tensor of zeros like :attr:`grad_tensor`.)doc", - R"doc(Supported dtypes, layouts, and ranks: - - +----------------------------+---------------------------------+-------------------+ - | Dtypes | Layouts | Ranks | - +----------------------------+---------------------------------+-------------------+ - | BFLOAT16 | TILE, ROW_MAJOR | 2, 3, 4 | - +----------------------------+---------------------------------+-------------------+ - - )doc"); + R"doc(Performs backward operations for fill zero on :attr:`input_tensor` with given :attr:`grad_tensor`. Returns an tensor of zeros like :attr:`grad_tensor`.)doc", + R"doc(BFLOAT16)doc", R"doc(TILE, ROW MAJOR)doc"); detail::bind_unary_backward_op( module, ttnn::i0_bw, R"doc(Performs backward operations for i0 on :attr:`input_tensor` with given :attr:`grad_tensor`.)doc", - R"doc(Supported dtypes, layouts, and ranks: - - +----------------------------+---------------------------------+-------------------+ - | Dtypes | Layouts | Ranks | - +----------------------------+---------------------------------+-------------------+ - | BFLOAT16, BFLOAT8_B | TILE | 2, 3, 4 | - +----------------------------+---------------------------------+-------------------+ - - )doc"); + R"doc(BFLOAT16, BFLOAT8_B)doc"); detail::bind_unary_backward_op( module, ttnn::tan_bw, R"doc(Performs backward operations for tan on :attr:`input_tensor` with given :attr:`grad_tensor`.)doc", - R"doc(Supported dtypes, layouts, and ranks: - - +----------------------------+---------------------------------+-------------------+ - | Dtypes | Layouts | Ranks | - +----------------------------+---------------------------------+-------------------+ - | BFLOAT16, BFLOAT8_B | TILE | 2, 3, 4 | - +----------------------------+---------------------------------+-------------------+ - - )doc"); + R"doc(BFLOAT16, BFLOAT8_B)doc"); detail::bind_unary_backward_op( module, ttnn::sigmoid_bw, R"doc(Performs backward operations for sigmoid on :attr:`input_tensor` with given :attr:`grad_tensor`.)doc", - R"doc(Supported dtypes, layouts, and ranks: - - +----------------------------+---------------------------------+-------------------+ - | Dtypes | Layouts | Ranks | - +----------------------------+---------------------------------+-------------------+ - | BFLOAT16, BFLOAT8_B | TILE | 2, 3, 4 | - +----------------------------+---------------------------------+-------------------+ - - )doc"); + R"doc(BFLOAT16, BFLOAT8_B)doc"); detail::bind_unary_backward_rsqrt( module, ttnn::rsqrt_bw, - R"doc(Performs backward operations for rsqrt on :attr:`input_tensor` with given :attr:`grad_tensor`.)doc", - R"doc(Supported dtypes, layouts, and ranks: - - +----------------------------+---------------------------------+-------------------+ - | Dtypes | Layouts | Ranks | - +----------------------------+---------------------------------+-------------------+ - | BFLOAT16, BFLOAT8_B | TILE | 2, 3, 4 | - +----------------------------+---------------------------------+-------------------+ - - )doc"); + R"doc(Performs backward operations for reciprocal of square-root on :attr:`input_tensor` with given :attr:`grad_tensor`.)doc", + R"doc(BFLOAT16, BFLOAT8_B)doc"); detail::bind_unary_backward_neg( module, ttnn::neg_bw, R"doc(Performs backward operations for neg on :attr:`input_tensor` with given :attr:`grad_tensor`.)doc", - R"doc(Supported dtypes, layouts, and ranks: - - +----------------------------+---------------------------------+-------------------+ - | Dtypes | Layouts | Ranks | - +----------------------------+---------------------------------+-------------------+ - | BFLOAT16, BFLOAT8_B | TILE | 2, 3, 4 | - +----------------------------+---------------------------------+-------------------+ - - )doc"); + R"doc(BFLOAT16, BFLOAT8_B)doc"); detail::bind_unary_backward( module, ttnn::relu_bw, R"doc(Performs backward operations for relu on :attr:`input_tensor` with given :attr:`grad_tensor`.)doc", - R"doc(Supported dtypes, layouts, and ranks: - - +----------------------------+---------------------------------+-------------------+ - | Dtypes | Layouts | Ranks | - +----------------------------+---------------------------------+-------------------+ - | BFLOAT16, BFLOAT8_B | TILE | 2, 3, 4 | - +----------------------------+---------------------------------+-------------------+ - - )doc"); + R"doc(BFLOAT16, BFLOAT8_B)doc"); detail::bind_unary_backward( module, ttnn::logit_bw, - R"doc(Performs backward operations for logit on :attr:`input_tensor` with given :attr:`grad_tensor`.)doc", - R"doc(Supported dtypes, layouts, and ranks: - - +----------------------------+---------------------------------+-------------------+ - | Dtypes | Layouts | Ranks | - +----------------------------+---------------------------------+-------------------+ - | BFLOAT16 | TILE | 2, 3, 4 | - +----------------------------+---------------------------------+-------------------+ - - )doc"); + R"doc(Performs backward operations for logit on :attr:`input_tensor` with given :attr:`grad_tensor`.)doc"); detail::bind_unary_backward( module, ttnn::floor_bw, R"doc(Performs backward operations for floor on :attr:`input_tensor` with given :attr:`grad_tensor`)doc", - R"doc(Supported dtypes, layouts, and ranks: - - +----------------------------+---------------------------------+-------------------+ - | Dtypes | Layouts | Ranks | - +----------------------------+---------------------------------+-------------------+ - | BFLOAT16 | TILE, ROW MAJOR | 2, 3, 4 | - +----------------------------+---------------------------------+-------------------+ - - )doc"); + R"doc(BFLOAT16)doc", R"doc(TILE, ROW MAJOR)doc"); detail::bind_unary_backward_float( module, @@ -1750,309 +1704,120 @@ void py_module(py::module& module) { module, ttnn::round_bw, R"doc(Performs backward operations for round on :attr:`input_tensor` with given :attr:`grad_tensor`.)doc", - R"doc(Supported dtypes, layouts, and ranks: - - +----------------------------+---------------------------------+-------------------+ - | Dtypes | Layouts | Ranks | - +----------------------------+---------------------------------+-------------------+ - | BFLOAT16 | TILE, ROW MAJOR | 2, 3, 4 | - +----------------------------+---------------------------------+-------------------+ - - )doc"); + R"doc(BFLOAT16)doc", R"doc(TILE, ROW MAJOR)doc"); detail::bind_unary_backward( module, ttnn::log_bw, - R"doc(Performs backward operations for logarithm on :attr:`input_tensor` with given :attr:`grad_tensor`)doc", - R"doc(Supported dtypes, layouts, and ranks: - - +----------------------------+---------------------------------+-------------------+ - | Dtypes | Layouts | Ranks | - +----------------------------+---------------------------------+-------------------+ - | BFLOAT16 | TILE | 2, 3, 4 | - +----------------------------+---------------------------------+-------------------+ - - )doc"); + R"doc(Performs backward operations for logarithm on :attr:`input_tensor` with given :attr:`grad_tensor`)doc"); detail::bind_unary_backward_op( module, ttnn::relu6_bw, - R"doc(Performs backward operations for relu6 on :attr:`input_tensor` with given :attr:`grad_tensor`)doc", - R"doc(Supported dtypes, layouts, and ranks: - - +----------------------------+---------------------------------+-------------------+ - | Dtypes | Layouts | Ranks | - +----------------------------+---------------------------------+-------------------+ - | BFLOAT16 | TILE | 2, 3, 4 | - +----------------------------+---------------------------------+-------------------+ - - )doc"); + R"doc(Performs backward operations for relu6 on :attr:`input_tensor` with given :attr:`grad_tensor`)doc"); detail::bind_unary_backward_op_overload_abs( module, ttnn::abs_bw, R"doc(Performs backward operations for abs on :attr:`input_tensor` with given :attr:`grad_tensor`)doc", - R"doc(Supported dtypes, layouts, and ranks: - - +----------------------------+---------------------------------+-------------------+ - | Dtypes | Layouts | Ranks | - +----------------------------+---------------------------------+-------------------+ - | BFLOAT16, BFLOAT8_B | TILE | 2, 3, 4 | - +----------------------------+---------------------------------+-------------------+ - - )doc"); + R"doc(BFLOAT16, BFLOAT8_B)doc"); detail::bind_unary_backward_neg( module, ttnn::silu_bw, - R"doc(Performs backward operations for silu on :attr:`input_tensor` with given :attr:`grad_tensor`)doc", - R"doc(Supported dtypes, layouts, and ranks: - - +----------------------------+---------------------------------+-------------------+ - | Dtypes | Layouts | Ranks | - +----------------------------+---------------------------------+-------------------+ - | BFLOAT16 | TILE | 2, 3, 4 | - +----------------------------+---------------------------------+-------------------+ - - )doc"); + R"doc(Performs backward operations for silu on :attr:`input_tensor` with given :attr:`grad_tensor`)doc"); detail::bind_unary_backward_op( module, ttnn::selu_bw, R"doc(Performs backward operations for selu on :attr:`input_tensor` with given :attr:`grad_tensor`)doc", - R"doc(Supported dtypes, layouts, and ranks: - - +----------------------------+---------------------------------+-------------------+ - | Dtypes | Layouts | Ranks | - +----------------------------+---------------------------------+-------------------+ - | BFLOAT16, BFLOAT8_B | TILE | 2, 3, 4 | - +----------------------------+---------------------------------+-------------------+ - - )doc"); + R"doc(BFLOAT16, BFLOAT8_B)doc"); detail::bind_unary_backward_op( module, ttnn::square_bw, R"doc(Performs backward operations for square on :attr:`input_tensor` with given :attr:`grad_tensor`)doc", - R"doc(Supported dtypes, layouts, and ranks: - - +----------------------------+---------------------------------+-------------------+ - | Dtypes | Layouts | Ranks | - +----------------------------+---------------------------------+-------------------+ - | BFLOAT16, BFLOAT8_B | TILE | 2, 3, 4 | - +----------------------------+---------------------------------+-------------------+ - - )doc"); + R"doc(BFLOAT16, BFLOAT8_B)doc"); detail::bind_unary_backward_op( module, ttnn::hardswish_bw, - R"doc(Performs backward operations for hardswish on :attr:`input_tensor` with given :attr:`grad_tensor`)doc", - R"doc(Supported dtypes, layouts, and ranks: - - +----------------------------+---------------------------------+-------------------+ - | Dtypes | Layouts | Ranks | - +----------------------------+---------------------------------+-------------------+ - | BFLOAT16 | TILE | 2, 3, 4 | - +----------------------------+---------------------------------+-------------------+ - - )doc"); + R"doc(Performs backward operations for hardswish on :attr:`input_tensor` with given :attr:`grad_tensor`)doc"); detail::bind_unary_backward_op( module, ttnn::tanhshrink_bw, R"doc(Performs backward operations for tanhshrink on :attr:`input_tensor` with given :attr:`grad_tensor`)doc", - R"doc(Supported dtypes, layouts, and ranks: - - +----------------------------+---------------------------------+-------------------+ - | Dtypes | Layouts | Ranks | - +----------------------------+---------------------------------+-------------------+ - | BFLOAT16, BFLOAT8_B | TILE | 2, 3, 4 | - +----------------------------+---------------------------------+-------------------+ - - )doc"); + R"doc(BFLOAT16, BFLOAT8_B)doc"); detail::bind_unary_backward_op( module, ttnn::atanh_bw, - R"doc(Performs backward operations for atanh on :attr:`input_tensor` with given :attr:`grad_tensor`)doc", - R"doc(Supported dtypes, layouts, and ranks: - - +----------------------------+---------------------------------+-------------------+ - | Dtypes | Layouts | Ranks | - +----------------------------+---------------------------------+-------------------+ - | BFLOAT16 | TILE | 2, 3, 4 | - +----------------------------+---------------------------------+-------------------+ - - )doc"); + R"doc(Performs backward operations for inverse hyperbolic tangent (atanh) on :attr:`input_tensor` with given :attr:`grad_tensor`)doc"); detail::bind_unary_backward_op( module, ttnn::asin_bw, - R"doc(Performs backward operations for asin on :attr:`input_tensor` with given :attr:`grad_tensor`)doc", - R"doc(Supported dtypes, layouts, and ranks: - - +----------------------------+---------------------------------+-------------------+ - | Dtypes | Layouts | Ranks | - +----------------------------+---------------------------------+-------------------+ - | BFLOAT16 | TILE | 2, 3, 4 | - +----------------------------+---------------------------------+-------------------+ - - )doc"); + R"doc(Performs backward operations for inverse sine (asin) on :attr:`input_tensor` with given :attr:`grad_tensor`)doc"); detail::bind_unary_backward_op( module, ttnn::asinh_bw, - R"doc(Performs backward operations for asinh on :attr:`input_tensor` with given :attr:`grad_tensor`)doc", - R"doc(Supported dtypes, layouts, and ranks: - - +----------------------------+---------------------------------+-------------------+ - | Dtypes | Layouts | Ranks | - +----------------------------+---------------------------------+-------------------+ - | BFLOAT16, BFLOAT8_B | TILE | 2, 3, 4 | - +----------------------------+---------------------------------+-------------------+ - - )doc"); + R"doc(Performs backward operations for inverse hyperbolic sine (asinh) on :attr:`input_tensor` with given :attr:`grad_tensor`)doc", + R"doc(BFLOAT16, BFLOAT8_B)doc"); detail::bind_unary_backward_op( module, ttnn::sin_bw, R"doc(Performs backward operations for sin on :attr:`input_tensor` with given :attr:`grad_tensor`)doc", - R"doc(Supported dtypes, layouts, and ranks: - - +----------------------------+---------------------------------+-------------------+ - | Dtypes | Layouts | Ranks | - +----------------------------+---------------------------------+-------------------+ - | BFLOAT16, BFLOAT8_B | TILE | 2, 3, 4 | - +----------------------------+---------------------------------+-------------------+ - - )doc"); + R"doc(BFLOAT16, BFLOAT8_B)doc"); detail::bind_unary_backward_op( module, ttnn::sinh_bw, - R"doc(Performs backward operations for sinh on :attr:`input_tensor` with given :attr:`grad_tensor`)doc", - R"doc(Supported dtypes, layouts, and ranks: - - +----------------------------+---------------------------------+-------------------+ - | Dtypes | Layouts | Ranks | - +----------------------------+---------------------------------+-------------------+ - | BFLOAT16 | TILE | 2, 3, 4 | - +----------------------------+---------------------------------+-------------------+ - - )doc"); + R"doc(Performs backward operations for hyperbolic sine (sinh) on :attr:`input_tensor` with given :attr:`grad_tensor`)doc"); detail::bind_unary_backward_op( module, ttnn::log10_bw, - R"doc(Performs backward operations for log10 on :attr:`input_tensor` with given :attr:`grad_tensor`)doc", - R"doc(Supported dtypes, layouts, and ranks: - - +----------------------------+---------------------------------+-------------------+ - | Dtypes | Layouts | Ranks | - +----------------------------+---------------------------------+-------------------+ - | BFLOAT16 | TILE | 2, 3, 4 | - +----------------------------+---------------------------------+-------------------+ - - )doc"); + R"doc(Performs backward operations for log10 on :attr:`input_tensor` with given :attr:`grad_tensor`)doc"); detail::bind_unary_backward_op( module, ttnn::log1p_bw, - R"doc(Performs backward operations for log1p on :attr:`input_tensor` with given :attr:`grad_tensor`)doc", - R"doc(Supported dtypes, layouts, and ranks: - - +----------------------------+---------------------------------+-------------------+ - | Dtypes | Layouts | Ranks | - +----------------------------+---------------------------------+-------------------+ - | BFLOAT16 | TILE | 2, 3, 4 | - +----------------------------+---------------------------------+-------------------+ - - )doc"); + R"doc(Performs backward operations for log1p on :attr:`input_tensor` with given :attr:`grad_tensor`)doc"); detail::bind_unary_backward_op( module, ttnn::erfc_bw, - R"doc(Performs backward operations for erfc on :attr:`input_tensor` with given :attr:`grad_tensor`)doc", - R"doc(Supported dtypes, layouts, and ranks: - - +----------------------------+---------------------------------+-------------------+ - | Dtypes | Layouts | Ranks | - +----------------------------+---------------------------------+-------------------+ - | BFLOAT16 | TILE | 2, 3, 4 | - +----------------------------+---------------------------------+-------------------+ - - )doc"); + R"doc(Performs backward operations for erfc on :attr:`input_tensor` with given :attr:`grad_tensor`)doc"); detail::bind_unary_backward_op( module, ttnn::ceil_bw, R"doc(Performs backward operations for ceil on :attr:`input_tensor` with given :attr:`grad_tensor`)doc", - R"doc(Supported dtypes, layouts, and ranks: - - +----------------------------+---------------------------------+-------------------+ - | Dtypes | Layouts | Ranks | - +----------------------------+---------------------------------+-------------------+ - | BFLOAT16 | TILE, ROW_MAJOR | 2, 3, 4 | - +----------------------------+---------------------------------+-------------------+ - - )doc"); + R"doc(BFLOAT16)doc", R"doc(TILE, ROW MAJOR)doc"); detail::bind_unary_backward_op( module, ttnn::softsign_bw, - R"doc(Performs backward operations for softsign on :attr:`input_tensor` with given :attr:`grad_tensor`)doc", - R"doc(Supported dtypes, layouts, and ranks: - - +----------------------------+---------------------------------+-------------------+ - | Dtypes | Layouts | Ranks | - +----------------------------+---------------------------------+-------------------+ - | BFLOAT16 | TILE | 2, 3, 4 | - +----------------------------+---------------------------------+-------------------+ - - )doc"); + R"doc(Performs backward operations for softsign on :attr:`input_tensor` with given :attr:`grad_tensor`)doc"); detail::bind_unary_backward_op( module, ttnn::cosh_bw, - R"doc(Performs backward operations for cosh on :attr:`input_tensor` with given :attr:`grad_tensor`.)doc", - R"doc(Supported dtypes, layouts, and ranks: - - +----------------------------+---------------------------------+-------------------+ - | Dtypes | Layouts | Ranks | - +----------------------------+---------------------------------+-------------------+ - | BFLOAT16 | TILE | 2, 3, 4 | - +----------------------------+---------------------------------+-------------------+ - - )doc"); + R"doc(Performs backward operations for hyperbolic cosine (cosh) on :attr:`input_tensor` with given :attr:`grad_tensor`.)doc"); detail::bind_unary_backward_op( module, ttnn::log2_bw, - R"doc(Performs backward operations for log2 on :attr:`input_tensor` with given :attr:`grad_tensor`.)doc", - R"doc(Supported dtypes, layouts, and ranks: - - +----------------------------+---------------------------------+-------------------+ - | Dtypes | Layouts | Ranks | - +----------------------------+---------------------------------+-------------------+ - | BFLOAT16 | TILE | 2, 3, 4 | - +----------------------------+---------------------------------+-------------------+ - - )doc"); + R"doc(Performs backward operations for log2 on :attr:`input_tensor` with given :attr:`grad_tensor`.)doc"); detail::bind_unary_backward_op( module, ttnn::sign_bw, R"doc(Performs backward operations for sign on :attr:`input_tensor` with given :attr:`grad_tensor`.)doc", - R"doc(Supported dtypes, layouts, and ranks: - - +----------------------------+---------------------------------+-------------------+ - | Dtypes | Layouts | Ranks | - +----------------------------+---------------------------------+-------------------+ - | BFLOAT16 | TILE, ROW_MAJOR | 2, 3, 4 | - +----------------------------+---------------------------------+-------------------+ - - )doc"); + R"doc(BFLOAT16)doc", R"doc(TILE, ROW MAJOR)doc"); detail::bind_unary_backward_float( module, @@ -2065,104 +1830,45 @@ void py_module(py::module& module) { module, ttnn::exp2_bw, R"doc(Performs backward operations for exp2 on :attr:`input_tensor` with given :attr:`grad_tensor`)doc", - R"doc(Supported dtypes, layouts, and ranks: - - +----------------------------+---------------------------------+-------------------+ - | Dtypes | Layouts | Ranks | - +----------------------------+---------------------------------+-------------------+ - | BFLOAT16, BFLOAT8_B | TILE | 2, 3, 4 | - +----------------------------+---------------------------------+-------------------+ - - )doc"); + R"doc(BFLOAT16, BFLOAT8_B)doc"); detail::bind_unary_backward_op( module, ttnn::expm1_bw, - R"doc(Performs backward operations for exp2 on :attr:`input_tensor` with given :attr:`grad_tensor`)doc", - R"doc(Supported dtypes, layouts, and ranks: - - +----------------------------+---------------------------------+-------------------+ - | Dtypes | Layouts | Ranks | - +----------------------------+---------------------------------+-------------------+ - | BFLOAT16, BFLOAT8_B | TILE | 2, 3, 4 | - +----------------------------+---------------------------------+-------------------+ - - )doc"); + R"doc(Performs backward operations for expm1 on :attr:`input_tensor` with given :attr:`grad_tensor`)doc", + R"doc(BFLOAT16, BFLOAT8_B)doc"); detail::bind_unary_backward_op_reciprocal( module, ttnn::reciprocal_bw, - R"doc(Performs backward operations for reciprocal on :attr:`input_tensor` with given :attr:`grad_tensor`)doc", - R"doc(Supported dtypes, layouts, and ranks: - - +----------------------------+---------------------------------+-------------------+ - | Dtypes | Layouts | Ranks | - +----------------------------+---------------------------------+-------------------+ - | BFLOAT16 | TILE | 2, 3, 4 | - +----------------------------+---------------------------------+-------------------+ - - )doc"); + R"doc(Performs backward operations for reciprocal on :attr:`input_tensor` with given :attr:`grad_tensor`)doc"); detail::bind_unary_backward_op( module, ttnn::digamma_bw, R"doc(Performs backward operations for digamma on :attr:`input_tensor` with given :attr:`grad_tensor`)doc", - R"doc(Supported dtypes, layouts, and ranks: - - +----------------------------+---------------------------------+-------------------+ - | Dtypes | Layouts | Ranks | - +----------------------------+---------------------------------+-------------------+ - | BFLOAT16, BFLOAT8_B | TILE | 2, 3, 4 | - +----------------------------+---------------------------------+-------------------+ - - )doc"); + R"doc(BFLOAT16, BFLOAT8_B)doc"); detail::bind_unary_backward_op( module, ttnn::erfinv_bw, - R"doc(Performs backward operations for erfinv on :attr:`input_tensor` with given :attr:`grad_tensor`)doc", - R"doc(Supported dtypes, layouts, and ranks: - - +----------------------------+---------------------------------+-------------------+ - | Dtypes | Layouts | Ranks | - +----------------------------+---------------------------------+-------------------+ - | BFLOAT16 | TILE | 2, 3, 4 | - +----------------------------+---------------------------------+-------------------+ - - )doc"); + R"doc(Performs backward operations for erfinv on :attr:`input_tensor` with given :attr:`grad_tensor`)doc"); detail::bind_unary_backward_op( module, ttnn::erf_bw, - R"doc(Performs backward operations for erf on :attr:`input_tensor` with given :attr:`grad_tensor`)doc", - R"doc(Supported dtypes, layouts, and ranks: - - +----------------------------+---------------------------------+-------------------+ - | Dtypes | Layouts | Ranks | - +----------------------------+---------------------------------+-------------------+ - | BFLOAT16 | TILE | 2, 3, 4 | - +----------------------------+---------------------------------+-------------------+ - - )doc"); + R"doc(Performs backward operations for erf on :attr:`input_tensor` with given :attr:`grad_tensor`)doc"); detail::bind_unary_backward_op( module, ttnn::deg2rad_bw, - R"doc(Performs backward operations for deg2rad on :attr:`input_tensor` with given :attr:`grad_tensor`)doc", - R"doc(Supported dtypes, layouts, and ranks: - - +----------------------------+---------------------------------+-------------------+ - | Dtypes | Layouts | Ranks | - +----------------------------+---------------------------------+-------------------+ - | BFLOAT16, BFLOAT8_B | TILE | 2, 3, 4 | - +----------------------------+---------------------------------+-------------------+ - - )doc"); + R"doc(Performs backward operations for degree to radian conversion (deg2rad) on :attr:`input_tensor` with given :attr:`grad_tensor`)doc", + R"doc(BFLOAT16, BFLOAT8_B)doc"); detail::bind_unary_backward_float( module, ttnn::polygamma_bw, - R"doc(Performs backward operations for polygamma on :attr:`input_tensor` or :attr:`input_tensor_a`, :attr:`scalar` with given :attr:`grad_tensor`.)doc", + R"doc(Performs backward operations for polygamma on :attr:`input_tensor`, :attr:`scalar` with given :attr:`grad_tensor`.)doc", "n", "Order of polygamma function"); }