diff --git a/ivy/functional/frontends/paddle/math.py b/ivy/functional/frontends/paddle/math.py index da18aa0ad3245..1613b2460df13 100644 --- a/ivy/functional/frontends/paddle/math.py +++ b/ivy/functional/frontends/paddle/math.py @@ -271,6 +271,18 @@ def heaviside(x, y, name=None): return ivy.heaviside(x, y) +@with_supported_dtypes( + {"2.5.1 and below": ("int32", "int64", "float32", "float64")}, "paddle" +) +@to_ivy_arrays_and_back +def increment(x, value=1.0, name=None): + if ( + ivy.prod(ivy.shape(x)) != 1 + ): # TODO this function will be simplified as soon as ivy.increment is add + raise ValueError("The input tensor x must contain only one element.") + return ivy.add(x, value) + + @with_supported_dtypes({"2.5.1 and below": ("float32", "float64")}, "paddle") @to_ivy_arrays_and_back def inner(x, y, name=None): @@ -338,6 +350,12 @@ def log(x, name=None): return ivy.log(x) +@with_supported_dtypes({"2.5.1 and below": ("float32", "float64")}, "paddle") +@to_ivy_arrays_and_back +def log10(x, name=None): + return ivy.log10(x) + + @with_supported_dtypes({"2.5.1 and below": ("float32", "float64")}, "paddle") @to_ivy_arrays_and_back def log1p(x, name=None): @@ -349,11 +367,6 @@ def log1p(x, name=None): def log2(x, name=None): return ivy.log2(x) -@with_supported_dtypes({"2.5.1 and below": ("float32", "float64")}, "paddle") -@to_ivy_arrays_and_back -def log10(x, name=None): - return ivy.log10(x) - @with_unsupported_dtypes({"2.5.1 and below": ("float16", "bfloat16")}, "paddle") @to_ivy_arrays_and_back diff --git a/ivy/functional/frontends/paddle/tensor/tensor.py b/ivy/functional/frontends/paddle/tensor/tensor.py index 3661cdaf67d6d..7bfbffe30ec6e 100644 --- a/ivy/functional/frontends/paddle/tensor/tensor.py +++ b/ivy/functional/frontends/paddle/tensor/tensor.py @@ -798,3 +798,9 @@ def real(self, name=None): ) def cast(self, dtype): return paddle_frontend.cast(self, dtype) + + @with_supported_dtypes( + {"2.5.1 and below": ("int32", "int64", "float32", "float64")}, "paddle" + ) + def increment(self, value, name=None): + return paddle_frontend.increment(self, value) diff --git a/ivy/functional/frontends/torch/comparison_ops.py b/ivy/functional/frontends/torch/comparison_ops.py index 0481f00ddca57..4a52f22da10a6 100644 --- a/ivy/functional/frontends/torch/comparison_ops.py +++ b/ivy/functional/frontends/torch/comparison_ops.py @@ -292,7 +292,7 @@ def topk(input, k, dim=None, largest=True, sorted=True, *, out=None): gt = greater +ne = not_equal ge = greater_equal le = less_equal lt = less -ne = not_equal diff --git a/ivy_tests/test_ivy/test_frontends/test_paddle/test_math.py b/ivy_tests/test_ivy/test_frontends/test_paddle/test_math.py index 096f73f9f7974..eba12accf1a76 100644 --- a/ivy_tests/test_ivy/test_frontends/test_paddle/test_math.py +++ b/ivy_tests/test_ivy/test_frontends/test_paddle/test_math.py @@ -1,4 +1,5 @@ # global +from typing import Literal from hypothesis import strategies as st # local @@ -45,7 +46,7 @@ def test_paddle_abs( dtype_and_x, on_device, fn_tree, - frontend, + frontend: Literal["paddle"], test_flags, backend_fw, ): @@ -73,7 +74,7 @@ def test_paddle_acos( dtype_and_x, on_device, fn_tree, - frontend, + frontend: Literal["paddle"], test_flags, backend_fw, ): @@ -102,7 +103,7 @@ def test_paddle_acosh( dtype_and_x, on_device, fn_tree, - frontend, + frontend: Literal["paddle"], test_flags, backend_fw, ): @@ -137,7 +138,7 @@ def test_paddle_add( dtype_and_x, on_device, fn_tree, - frontend, + frontend: Literal["paddle"], test_flags, backend_fw, ): @@ -180,7 +181,7 @@ def test_paddle_addmm( alpha, on_device, fn_tree, - frontend, + frontend: Literal["paddle"], test_flags, backend_fw, ): @@ -216,7 +217,7 @@ def test_paddle_amax( on_device, fn_tree, backend_fw, - frontend, + frontend: Literal["paddle"], test_flags, ): input_dtype, x = dtype_and_x @@ -247,7 +248,7 @@ def test_paddle_amin( on_device, fn_tree, backend_fw, - frontend, + frontend: Literal["paddle"], test_flags, ): input_dtype, x, axis = dtype_and_x @@ -275,7 +276,7 @@ def test_paddle_angle( dtype_and_x, on_device, fn_tree, - frontend, + frontend: Literal["paddle"], backend_fw, test_flags, ): @@ -307,7 +308,7 @@ def test_paddle_any( dtype_and_x, on_device, fn_tree, - frontend, + frontend: Literal["paddle"], test_flags, backend_fw, ): @@ -335,7 +336,7 @@ def test_paddle_any( def test_paddle_asin( *, dtype_and_x, - frontend, + frontend: Literal["paddle"], test_flags, fn_tree, backend_fw, @@ -365,7 +366,7 @@ def test_paddle_asinh( dtype_and_x, on_device, fn_tree, - frontend, + frontend: Literal["paddle"], backend_fw, test_flags, ): @@ -392,7 +393,7 @@ def test_paddle_asinh( def test_paddle_atan( *, dtype_and_x, - frontend, + frontend: Literal["paddle"], test_flags, fn_tree, backend_fw, @@ -428,7 +429,7 @@ def test_paddle_atan2( dtype_and_x, on_device, fn_tree, - frontend, + frontend: Literal["paddle"], backend_fw, test_flags, ): @@ -457,7 +458,7 @@ def test_paddle_atanh( dtype_and_x, on_device, fn_tree, - frontend, + frontend: Literal["paddle"], test_flags, backend_fw, ): @@ -483,7 +484,7 @@ def test_paddle_atanh( def test_paddle_ceil( *, dtype_and_x, - frontend, + frontend: Literal["paddle"], test_flags, fn_tree, backend_fw, @@ -511,7 +512,7 @@ def test_paddle_ceil( def test_paddle_conj( *, dtype_and_input, - frontend, + frontend: Literal["paddle"], backend_fw, test_flags, fn_tree, @@ -541,7 +542,7 @@ def test_paddle_cos( dtype_and_x, on_device, fn_tree, - frontend, + frontend: Literal["paddle"], test_flags, backend_fw, ): @@ -569,7 +570,7 @@ def test_paddle_cosh( dtype_and_x, on_device, fn_tree, - frontend, + frontend: Literal["paddle"], test_flags, backend_fw, ): @@ -601,7 +602,7 @@ def test_paddle_count_nonzero( dtype_and_x, on_device, fn_tree, - frontend, + frontend: Literal["paddle"], backend_fw, test_flags, ): @@ -635,7 +636,7 @@ def test_paddle_cumprod( dtype_x_axis, on_device, fn_tree, - frontend, + frontend: Literal["paddle"], backend_fw, test_flags, ): @@ -664,7 +665,7 @@ def test_paddle_deg2rad( dtype_and_x, on_device, fn_tree, - frontend, + frontend: Literal["paddle"], backend_fw, test_flags, ): @@ -708,7 +709,7 @@ def test_paddle_diff( dtype_prepend, dtype_append, test_flags, - frontend, + frontend: Literal["paddle"], backend_fw, fn_tree, on_device, @@ -744,7 +745,7 @@ def test_paddle_digamma( dtype_and_x, on_device, fn_tree, - frontend, + frontend: Literal["paddle"], backend_fw, test_flags, ): @@ -779,7 +780,7 @@ def test_paddle_divide( dtype_and_x, on_device, fn_tree, - frontend, + frontend: Literal["paddle"], test_flags, backend_fw, ): @@ -806,7 +807,7 @@ def test_paddle_divide( def test_paddle_erf( *, dtype_and_x, - frontend, + frontend: Literal["paddle"], backend_fw, test_flags, fn_tree, @@ -836,7 +837,7 @@ def test_paddle_exp( dtype_and_x, on_device, fn_tree, - frontend, + frontend: Literal["paddle"], backend_fw, test_flags, ): @@ -864,7 +865,7 @@ def test_paddle_expm1( dtype_and_x, on_device, fn_tree, - frontend, + frontend: Literal["paddle"], backend_fw, test_flags, ): @@ -890,7 +891,7 @@ def test_paddle_expm1( def test_paddle_floor( *, dtype_and_x, - frontend, + frontend: Literal["paddle"], backend_fw, test_flags, fn_tree, @@ -925,7 +926,7 @@ def test_paddle_floor_divide( dtype_and_x, on_device, fn_tree, - frontend, + frontend: Literal["paddle"], backend_fw, test_flags, ): @@ -962,7 +963,7 @@ def test_paddle_floor_mod( dtype_and_x, on_device, fn_tree, - frontend, + frontend: Literal["paddle"], backend_fw, test_flags, ): @@ -990,7 +991,7 @@ def test_paddle_fmax( dtypes_and_x, on_device, fn_tree, - frontend, + frontend: Literal["paddle"], backend_fw, test_flags, ): @@ -1018,7 +1019,7 @@ def test_paddle_fmin( dtypes_and_x, on_device, fn_tree, - frontend, + frontend: Literal["paddle"], backend_fw, test_flags, ): @@ -1048,7 +1049,7 @@ def test_paddle_fmin( def test_paddle_frac( *, dtype_and_x, - frontend, + frontend: Literal["paddle"], backend_fw, test_flags, fn_tree, @@ -1084,7 +1085,7 @@ def test_paddle_gcd( dtype_and_x, on_device, fn_tree, - frontend, + frontend: Literal["paddle"], backend_fw, test_flags, ): @@ -1119,7 +1120,7 @@ def test_paddle_heaviside( dtype_and_x, on_device, fn_tree, - frontend, + frontend: Literal["paddle"], backend_fw, test_flags, ): @@ -1136,6 +1137,40 @@ def test_paddle_heaviside( ) +@handle_frontend_test( + fn_tree="paddle.increment", + dtype_and_x=helpers.dtype_and_values( + available_dtypes=helpers.get_dtypes("float"), + num_arrays=1, + shape=(1,), + allow_inf=False, + large_abs_safety_factor=2, + small_abs_safety_factor=2, + safety_factor_scale="log", + shared_dtype=True, + ), +) +def test_paddle_increment( + *, + dtype_and_x, + on_device, + fn_tree, + frontend: Literal["paddle"], + test_flags, + backend_fw, +): + input_dtype, x = dtype_and_x + helpers.test_frontend_function( + input_dtypes=input_dtype, + backend_to_test=backend_fw, + frontend=frontend, + fn_tree=fn_tree, + test_flags=test_flags, + on_device=on_device, + x=x[0], + ) + + # inner @handle_frontend_test( fn_tree="paddle.inner", @@ -1152,7 +1187,7 @@ def test_paddle_inner( dtype_and_x, on_device, fn_tree, - frontend, + frontend: Literal["paddle"], backend_fw, test_flags, ): @@ -1179,7 +1214,7 @@ def test_paddle_inner( def test_paddle_isfinite( *, dtype_and_x, - frontend, + frontend: Literal["paddle"], backend_fw, test_flags, fn_tree, @@ -1207,7 +1242,7 @@ def test_paddle_isfinite( def test_paddle_isinf( *, dtype_and_x, - frontend, + frontend: Literal["paddle"], backend_fw, test_flags, fn_tree, @@ -1235,7 +1270,7 @@ def test_paddle_isinf( def test_paddle_isnan( *, dtype_and_x, - frontend, + frontend: Literal["paddle"], backend_fw, test_flags, fn_tree, @@ -1269,7 +1304,7 @@ def test_paddle_kron( on_device, fn_tree, backend_fw, - frontend, + frontend: Literal["paddle"], test_flags, ): input_dtype, x = dtype_and_x @@ -1302,7 +1337,7 @@ def test_paddle_lcm( dtype_and_x, on_device, fn_tree, - frontend, + frontend: Literal["paddle"], backend_fw, test_flags, ): @@ -1337,7 +1372,7 @@ def test_paddle_lerp( dtype_and_x, on_device, fn_tree, - frontend, + frontend: Literal["paddle"], backend_fw, test_flags, ): @@ -1368,7 +1403,7 @@ def test_paddle_lgamma( dtype_and_x, on_device, fn_tree, - frontend, + frontend: Literal["paddle"], backend_fw, test_flags, ): @@ -1397,7 +1432,7 @@ def test_paddle_log( dtype_and_x, on_device, fn_tree, - frontend, + frontend: Literal["paddle"], test_flags, backend_fw, ): @@ -1413,20 +1448,18 @@ def test_paddle_log( ) -# log1p @handle_frontend_test( - fn_tree="paddle.log1p", + fn_tree="paddle.log10", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("valid"), - max_value=1e5, ), ) -def test_paddle_log1p( +def test_paddle_log10( *, dtype_and_x, on_device, fn_tree, - frontend, + frontend: Literal["paddle"], backend_fw, test_flags, ): @@ -1442,19 +1475,20 @@ def test_paddle_log1p( ) -# log2 +# log1p @handle_frontend_test( - fn_tree="paddle.log2", + fn_tree="paddle.log1p", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("valid"), + max_value=1e5, ), ) -def test_paddle_log2( +def test_paddle_log1p( *, dtype_and_x, on_device, fn_tree, - frontend, + frontend: Literal["paddle"], backend_fw, test_flags, ): @@ -1469,18 +1503,20 @@ def test_paddle_log2( x=x[0], ) + +# log2 @handle_frontend_test( - fn_tree="paddle.log10", + fn_tree="paddle.log2", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("valid"), ), ) -def test_paddle_log10( +def test_paddle_log2( *, dtype_and_x, on_device, fn_tree, - frontend, + frontend: Literal["paddle"], backend_fw, test_flags, ): @@ -1508,7 +1544,7 @@ def test_paddle_logit( dtype_and_x, on_device, fn_tree, - frontend, + frontend: Literal["paddle"], backend_fw, test_flags, ): @@ -1541,7 +1577,7 @@ def test_paddle_max( dtype_and_x, on_device, fn_tree, - frontend, + frontend: Literal["paddle"], backend_fw, test_flags, ): @@ -1573,7 +1609,7 @@ def test_paddle_maximum( dtype_and_x, on_device, fn_tree, - frontend, + frontend: Literal["paddle"], backend_fw, test_flags, ): @@ -1606,7 +1642,7 @@ def test_paddle_min( dtype_and_x, on_device, fn_tree, - frontend, + frontend: Literal["paddle"], backend_fw, test_flags, ): @@ -1637,7 +1673,7 @@ def test_paddle_minimum( dtype_and_x, on_device, fn_tree, - frontend, + frontend: Literal["paddle"], backend_fw, test_flags, ): @@ -1664,7 +1700,7 @@ def test_paddle_mm( dtype_xy, on_device, fn_tree, - frontend, + frontend: Literal["paddle"], test_flags, backend_fw, ): @@ -1699,7 +1735,7 @@ def test_paddle_multiply( dtype_and_x, on_device, fn_tree, - frontend, + frontend: Literal["paddle"], test_flags, backend_fw, ): @@ -1729,7 +1765,7 @@ def test_paddle_nanmean( dtype_and_x, on_device, fn_tree, - frontend, + frontend: Literal["paddle"], backend_fw, test_flags, ): @@ -1763,7 +1799,7 @@ def test_paddle_nansum( dtype_and_x, on_device, fn_tree, - frontend, + frontend: Literal["paddle"], backend_fw, test_flags, ): @@ -1794,7 +1830,7 @@ def test_paddle_neg( dtype_and_x, on_device, fn_tree, - frontend, + frontend: Literal["paddle"], backend_fw, test_flags, ): @@ -1826,7 +1862,7 @@ def test_paddle_outer( dtype_and_x, on_device, fn_tree, - frontend, + frontend: Literal["paddle"], backend_fw, test_flags, ): @@ -1858,7 +1894,7 @@ def test_paddle_pow( dtype_and_x, on_device, fn_tree, - frontend, + frontend: Literal["paddle"], test_flags, backend_fw, ): @@ -1895,7 +1931,7 @@ def test_paddle_prod( on_device, backend_fw, fn_tree, - frontend, + frontend: Literal["paddle"], test_flags, ): input_dtype, x, axis = dtype_and_x @@ -1924,7 +1960,7 @@ def test_paddle_rad2deg( dtype_and_x, on_device, fn_tree, - frontend, + frontend: Literal["paddle"], backend_fw, test_flags, ): @@ -1952,7 +1988,7 @@ def test_paddle_reciprocal( dtype_and_x, on_device, fn_tree, - frontend, + frontend: Literal["paddle"], backend_fw, test_flags, ): @@ -1986,7 +2022,7 @@ def test_paddle_remainder( dtype_and_x, on_device, fn_tree, - frontend, + frontend: Literal["paddle"], backend_fw, test_flags, ): @@ -2014,7 +2050,7 @@ def test_paddle_remainder( def test_paddle_round( *, dtype_and_x, - frontend, + frontend: Literal["paddle"], test_flags, fn_tree, backend_fw, @@ -2042,7 +2078,7 @@ def test_paddle_round( def test_paddle_rsqrt( *, dtype_and_x, - frontend, + frontend: Literal["paddle"], test_flags, fn_tree, on_device, @@ -2078,7 +2114,7 @@ def test_paddle_sgn( dtype_and_x, on_device, fn_tree, - frontend, + frontend: Literal["paddle"], backend_fw, test_flags, ): @@ -2106,7 +2142,7 @@ def test_paddle_sign( dtype_and_x, on_device, fn_tree, - frontend, + frontend: Literal["paddle"], backend_fw, test_flags, ): @@ -2134,7 +2170,7 @@ def test_paddle_sin( dtype_and_x, on_device, fn_tree, - frontend, + frontend: Literal["paddle"], test_flags, backend_fw, ): @@ -2163,7 +2199,7 @@ def test_paddle_sinh( dtype_and_x, on_device, fn_tree, - frontend, + frontend: Literal["paddle"], test_flags, backend_fw, ): @@ -2189,7 +2225,7 @@ def test_paddle_sinh( def test_paddle_sqrt( *, dtype_and_x, - frontend, + frontend: Literal["paddle"], test_flags, fn_tree, backend_fw, @@ -2219,7 +2255,7 @@ def test_paddle_square( dtype_and_x, on_device, fn_tree, - frontend, + frontend: Literal["paddle"], backend_fw, test_flags, ): @@ -2249,7 +2285,7 @@ def test_paddle_stanh( dtype_and_x, on_device, fn_tree, - frontend, + frontend: Literal["paddle"], test_flags, backend_fw, scale_a, @@ -2287,7 +2323,7 @@ def test_paddle_subtract( dtype_and_x, on_device, fn_tree, - frontend, + frontend: Literal["paddle"], test_flags, backend_fw, ): @@ -2321,7 +2357,7 @@ def test_paddle_sum( dtype_and_x, on_device, fn_tree, - frontend, + frontend: Literal["paddle"], test_flags, backend_fw, ): @@ -2347,7 +2383,7 @@ def test_paddle_take( on_device, fn_tree, backend_fw, - frontend, + frontend: Literal["paddle"], test_flags, ): dtypes, xs, indices, modes = dtype_and_values @@ -2376,7 +2412,7 @@ def test_paddle_tan( dtype_and_x, on_device, fn_tree, - frontend, + frontend: Literal["paddle"], backend_fw, test_flags, ): @@ -2405,7 +2441,7 @@ def test_paddle_tanh( dtype_and_x, on_device, fn_tree, - frontend, + frontend: Literal["paddle"], test_flags, backend_fw, ): @@ -2445,7 +2481,7 @@ def test_paddle_trace( axis2, on_device, fn_tree, - frontend, + frontend: Literal["paddle"], test_flags, backend_fw, ): @@ -2476,7 +2512,7 @@ def test_paddle_trunc( dtype_and_x, on_device, fn_tree, - frontend, + frontend: Literal["paddle"], backend_fw, test_flags, ): diff --git a/ivy_tests/test_ivy/test_frontends/test_paddle/test_tensor/test_tensor.py b/ivy_tests/test_ivy/test_frontends/test_paddle/test_tensor/test_tensor.py index 255de088250a0..d471ff4cd2dec 100644 --- a/ivy_tests/test_ivy/test_frontends/test_paddle/test_tensor/test_tensor.py +++ b/ivy_tests/test_ivy/test_frontends/test_paddle/test_tensor/test_tensor.py @@ -2175,6 +2175,43 @@ def test_paddle_tensor_imag( ) +@handle_frontend_method( + class_tree=CLASS_TREE, # the class tree from your provided context + init_tree="paddle.to_tensor", + method_name="increment", + dtypes_and_x=helpers.dtype_and_values( + available_dtypes=helpers.get_dtypes( + "float" + ), # as float32 and float64 are mentioned as supported + num_arrays=1, + shape=(1,), + ), +) +def test_paddle_tensor_increment( + dtypes_and_x, + frontend_method_data, + init_flags, + method_flags, + frontend, + on_device, + backend_fw, +): + input_dtype, x = dtypes_and_x + value = 5.0 # example value to increment + helpers.test_frontend_method( + init_input_dtypes=input_dtype, + backend_to_test=backend_fw, + init_all_as_kwargs_np={"data": x[0]}, + method_input_dtypes=input_dtype, + method_all_as_kwargs_np={"value": value}, + frontend_method_data=frontend_method_data, + init_flags=init_flags, + method_flags=method_flags, + frontend=frontend, + on_device=on_device, + ) + + # inner @handle_frontend_method( class_tree=CLASS_TREE, diff --git a/ivy_tests/test_ivy/test_frontends/test_torch/test_utilities.py b/ivy_tests/test_ivy/test_frontends/test_torch/test_utilities.py index 8fcae08acbf55..6956ffa3f2a68 100644 --- a/ivy_tests/test_ivy/test_frontends/test_torch/test_utilities.py +++ b/ivy_tests/test_ivy/test_frontends/test_torch/test_utilities.py @@ -6,6 +6,50 @@ from ivy_tests.test_ivy.helpers import handle_frontend_test +# --- Helpers --- # +# --------------- # + + +@st.composite +def _elemwise_helper(draw): + value_strategy = st.one_of( + helpers.dtype_and_values( + available_dtypes=helpers.get_dtypes("valid"), + ), + st.integers(min_value=-10000, max_value=10000), + st.floats(min_value=-10000, max_value=10000), + ) + + dtype_and_x1 = draw(value_strategy) + if isinstance(dtype_and_x1, tuple): + dtype1 = dtype_and_x1[0] + x1 = dtype_and_x1[1][0] + else: + dtype1 = [] + x1 = dtype_and_x1 + + dtype_and_x2 = draw(value_strategy) + if isinstance(dtype_and_x2, tuple): + dtype2 = dtype_and_x2[0] + x2 = dtype_and_x2[1][0] + else: + dtype2 = [] + x2 = dtype_and_x2 + + num_pos_args = None + if not dtype1 and not dtype2: + num_pos_args = 2 + elif not dtype1: + x1, x2 = x2, x1 + input_dtypes = dtype1 + dtype2 + + return x1, x2, input_dtypes, num_pos_args + + +# --- Main --- # +# ------------ # + + # ToDo: Fix this test after torch overide of assert is implemented # @handle_frontend_test( # fn_tree="torch._assert", @@ -74,42 +118,6 @@ def test_torch_bincount( ) -@st.composite -def _elemwise_helper(draw): - value_strategy = st.one_of( - helpers.dtype_and_values( - available_dtypes=helpers.get_dtypes("valid"), - ), - st.integers(min_value=-10000, max_value=10000), - st.floats(min_value=-10000, max_value=10000), - ) - - dtype_and_x1 = draw(value_strategy) - if isinstance(dtype_and_x1, tuple): - dtype1 = dtype_and_x1[0] - x1 = dtype_and_x1[1][0] - else: - dtype1 = [] - x1 = dtype_and_x1 - - dtype_and_x2 = draw(value_strategy) - if isinstance(dtype_and_x2, tuple): - dtype2 = dtype_and_x2[0] - x2 = dtype_and_x2[1][0] - else: - dtype2 = [] - x2 = dtype_and_x2 - - num_pos_args = None - if not dtype1 and not dtype2: - num_pos_args = 2 - elif not dtype1: - x1, x2 = x2, x1 - input_dtypes = dtype1 + dtype2 - - return x1, x2, input_dtypes, num_pos_args - - @handle_frontend_test( fn_tree="torch.result_type", dtypes_and_xs=_elemwise_helper(),