From e7e2bcd6e96b075bb3485db21d9aa3dbd1ba9508 Mon Sep 17 00:00:00 2001 From: Rohinish <92542124+rohinish404@users.noreply.github.com> Date: Tue, 4 Jul 2023 18:24:08 +0530 Subject: [PATCH 1/3] equal (#17927) --- .../frontends/paddle/tensor/tensor.py | 15 +++++++++ .../test_tensor/test_paddle_tensor.py | 33 +++++++++++++++++++ 2 files changed, 48 insertions(+) diff --git a/ivy/functional/frontends/paddle/tensor/tensor.py b/ivy/functional/frontends/paddle/tensor/tensor.py index af2a57b98a985..007526b4a5b81 100644 --- a/ivy/functional/frontends/paddle/tensor/tensor.py +++ b/ivy/functional/frontends/paddle/tensor/tensor.py @@ -365,6 +365,21 @@ def cumsum(self, axis=None, dtype=None, name=None): def angle(self, name=None): return ivy.angle(self._ivy_array) + @with_unsupported_dtypes( + { + "2.5.0 and below": ( + "uint8", + "int8", + "int16", + "complex64", + "complex128", + ) + }, + "paddle", + ) + def equal(self, y, name=None): + return paddle_frontend.equal(self, y) + @with_unsupported_dtypes({"2.5.0 and below": ("float16", "bfloat16")}, "paddle") def rad2deg(self, name=None): return ivy.rad2deg(self._ivy_array) diff --git a/ivy_tests/test_ivy/test_frontends/test_paddle/test_tensor/test_paddle_tensor.py b/ivy_tests/test_ivy/test_frontends/test_paddle/test_tensor/test_paddle_tensor.py index ee2456266fd67..68a68a6c690f4 100644 --- a/ivy_tests/test_ivy/test_frontends/test_paddle/test_tensor/test_paddle_tensor.py +++ b/ivy_tests/test_ivy/test_frontends/test_paddle/test_tensor/test_paddle_tensor.py @@ -1683,6 +1683,39 @@ def test_paddle_angle( ) +# equal +@handle_frontend_method( + class_tree=CLASS_TREE, + init_tree="paddle.to_tensor", + method_name="equal", + dtypes_and_x=helpers.dtype_and_values( + available_dtypes=helpers.get_dtypes("valid"), + num_arrays=2, + shared_dtype=True, + ), +) +def test_paddle_equal( + dtypes_and_x, + frontend_method_data, + init_flags, + method_flags, + frontend, + on_device, +): + input_dtype, x = dtypes_and_x + helpers.test_frontend_method( + init_input_dtypes=input_dtype, + init_all_as_kwargs_np={"data": x[0]}, + method_input_dtypes=input_dtype, + method_all_as_kwargs_np={"y": x[1]}, + frontend_method_data=frontend_method_data, + init_flags=init_flags, + method_flags=method_flags, + frontend=frontend, + on_device=on_device, + ) + + # rad2deg @handle_frontend_method( class_tree=CLASS_TREE, From 657f7df6ef27bac2db52081f7ec42dfe7d23add7 Mon Sep 17 00:00:00 2001 From: MahmoudAshraf97 Date: Tue, 4 Jul 2023 12:56:05 +0000 Subject: [PATCH 2/3] refactor `ivy.vector_norm` * the old function used native norm functions in the backend that calculated matrix norm instead of batched vector norm when the `axis` argument is passed as a tuple --- ivy/functional/backends/jax/linear_algebra.py | 26 +++++----- .../backends/numpy/linear_algebra.py | 28 +++++------ .../backends/paddle/linear_algebra.py | 49 +++++++------------ .../backends/tensorflow/linear_algebra.py | 26 +++------- .../test_functional/test_core/test_linalg.py | 10 ++-- 5 files changed, 59 insertions(+), 80 deletions(-) diff --git a/ivy/functional/backends/jax/linear_algebra.py b/ivy/functional/backends/jax/linear_algebra.py index e223326c8729b..660f000464d94 100644 --- a/ivy/functional/backends/jax/linear_algebra.py +++ b/ivy/functional/backends/jax/linear_algebra.py @@ -422,21 +422,17 @@ def vector_norm( ) -> JaxArray: if dtype and x.dtype != dtype: x = x.astype(dtype) - - ret_scalar = False - if x.ndim == 0: - x = jnp.expand_dims(x, 0) - ret_scalar = True - - if axis is None: - x = x.reshape([-1]) - elif isinstance(axis, list): - axis = tuple(axis) - - jnp_normalized_vector = jnp.linalg.norm(x, ord, axis, keepdims) - if ret_scalar: - jnp_normalized_vector = jnp.squeeze(jnp_normalized_vector) - return jnp_normalized_vector + abs_x = jnp.abs(x) + if ord == 0: + return jnp.sum( + (abs_x != 0).astype(abs_x.dtype), axis=axis, keepdims=keepdims, out=out + ) + elif ord == inf: + return jnp.max(abs_x, axis=axis, keepdims=keepdims, out=out) + elif ord == -inf: + return jnp.min(abs_x, axis=axis, keepdims=keepdims, out=out) + else: + return jnp.sum(abs_x**ord, axis=axis, keepdims=keepdims) ** (1.0 / ord) # Extra # diff --git a/ivy/functional/backends/numpy/linear_algebra.py b/ivy/functional/backends/numpy/linear_algebra.py index bfe0c65ac47e1..e1334a89012cc 100644 --- a/ivy/functional/backends/numpy/linear_algebra.py +++ b/ivy/functional/backends/numpy/linear_algebra.py @@ -378,21 +378,21 @@ def vector_norm( ) -> np.ndarray: if dtype and x.dtype != dtype: x = x.astype(dtype) - - ret_scalar = False - if x.ndim == 0: - x = np.expand_dims(x, 0) - ret_scalar = True - - if axis is None: - x = x.reshape([-1]) - elif isinstance(axis, list): + abs_x = np.abs(x) + if isinstance(axis, list): axis = tuple(axis) - - np_normalized_vector = np.linalg.norm(x, ord, axis, keepdims) - if ret_scalar: - np_normalized_vector = np.squeeze(np_normalized_vector) - return np_normalized_vector + if ord == 0: + return np.sum( + (abs_x != 0).astype(abs_x.dtype), axis=axis, keepdims=keepdims, out=out + ) + elif ord == inf: + return np.max(abs_x, axis=axis, keepdims=keepdims, out=out) + elif ord == -inf: + return np.min(abs_x, axis=axis, keepdims=keepdims, out=out) + else: + return ( + np.sum(abs_x**ord, axis=axis, keepdims=keepdims) ** (1.0 / ord) + ).astype(abs_x.dtype) # Extra # diff --git a/ivy/functional/backends/paddle/linear_algebra.py b/ivy/functional/backends/paddle/linear_algebra.py index 15f49c63c5522..9ebfc8f00acbd 100644 --- a/ivy/functional/backends/paddle/linear_algebra.py +++ b/ivy/functional/backends/paddle/linear_algebra.py @@ -591,37 +591,26 @@ def vector_norm( dtype: Optional[paddle.dtype] = None, out: Optional[paddle.Tensor] = None, ) -> paddle.Tensor: - ret_scalar = False - dtype = dtype if dtype is not None else x.dtype - if dtype in ["complex64", "complex128"]: - dtype = "float" + str(ivy.dtype_bits(dtype) // 2) - if x.ndim == 0: - x = paddle_backend.expand_dims(x, axis=0) - ret_scalar = True - - if x.dtype in [ - paddle.int8, - paddle.int16, - paddle.int32, - paddle.int64, - paddle.uint8, - paddle.float16, - paddle.complex64, - paddle.complex128, - paddle.bool, - ]: - if paddle.is_complex(x): - x = paddle.abs(x) - ret = paddle.norm(x, p=ord, axis=axis, keepdim=keepdims).astype(dtype) - else: - ret = paddle.norm( - x.cast("float32"), p=ord, axis=axis, keepdim=keepdims - ).astype(dtype) + if dtype and x.dtype != dtype: + x = x.astype(dtype) + abs_x = paddle_backend.abs(x) + if ord == 0: + return paddle_backend.sum( + (abs_x != 0).astype(abs_x.dtype), axis=axis, keepdims=keepdims + ) + elif ord == inf: + return paddle_backend.max(abs_x, axis=axis, keepdims=keepdims) + elif ord == -inf: + return paddle_backend.min(abs_x, axis=axis, keepdims=keepdims) else: - ret = paddle.norm(x, p=ord, axis=axis, keepdim=keepdims).astype(dtype) - if ret_scalar or (x.ndim == 1 and not keepdims): - ret = paddle_backend.squeeze(ret, axis=axis) - return ret + return paddle_backend.pow( + paddle_backend.sum( + paddle_backend.pow(abs_x, ord), + axis=axis, + keepdims=keepdims, + ), + (1.0 / ord), + ) # Extra # diff --git a/ivy/functional/backends/tensorflow/linear_algebra.py b/ivy/functional/backends/tensorflow/linear_algebra.py index 9d87364f692b4..edf7dd1bd8d7c 100644 --- a/ivy/functional/backends/tensorflow/linear_algebra.py +++ b/ivy/functional/backends/tensorflow/linear_algebra.py @@ -655,25 +655,15 @@ def vector_norm( ) -> Union[tf.Tensor, tf.Variable]: if dtype and x.dtype != dtype: x = tf.cast(x, dtype) - # Mathematical Norms - if ord > 0: - tn_normalized_vector = tf.linalg.norm(x, ord, axis, keepdims) + abs_x = tf.abs(x) + if ord == 0: + return tf.reduce_sum(tf.cast(x != 0, abs_x.dtype), axis=axis, keepdims=keepdims) + elif ord == inf: + return tf.reduce_max(abs_x, axis=axis, keepdims=keepdims) + elif ord == -inf: + return tf.reduce_min(abs_x, axis=axis, keepdims=keepdims) else: - if ord == -float("inf"): - tn_normalized_vector = tf.reduce_min(tf.abs(x), axis, keepdims) - elif ord == 0: - tn_normalized_vector = tf.reduce_sum( - tf.cast(x != 0, x.dtype), axis, keepdims - ) - else: - tn_normalized_vector = tf.reduce_sum(tf.abs(x) ** ord, axis, keepdims) ** ( - 1.0 / ord - ) - tn_normalized_vector = tf.cast( - tn_normalized_vector, tn_normalized_vector.dtype.real_dtype - ) - - return tn_normalized_vector + return tf.reduce_sum(abs_x**ord, axis=axis, keepdims=keepdims) ** (1.0 / ord) # Extra # diff --git a/ivy_tests/test_ivy/test_functional/test_core/test_linalg.py b/ivy_tests/test_ivy/test_functional/test_core/test_linalg.py index ab598e3c7eb7d..cbfe79b811043 100644 --- a/ivy_tests/test_ivy/test_functional/test_core/test_linalg.py +++ b/ivy_tests/test_ivy/test_functional/test_core/test_linalg.py @@ -874,12 +874,12 @@ def test_vecdot( max_value=1e04, abs_smallest_val=1e-04, max_axes_size=2, - force_int_axis=True, + allow_neg_axes=True, ), kd=st.booleans(), ord=st.one_of( - helpers.ints(min_value=0, max_value=5), - helpers.floats(min_value=1.0, max_value=5.0), + helpers.ints(min_value=-5, max_value=5), + helpers.floats(min_value=-5, max_value=5.0), st.sampled_from((float("inf"), -float("inf"))), ), dtype=helpers.get_dtypes("numeric", full=False, none=True), @@ -897,6 +897,10 @@ def test_vector_norm( ground_truth_backend, ): x_dtype, x, axis = dtype_values_axis + # to avoid tuple axis with only one axis as force_int_axis can't generate + # axis with two axes + if isinstance(axis, tuple) and len(axis) == 1: + axis = axis[0] helpers.test_function( ground_truth_backend=ground_truth_backend, input_dtypes=x_dtype, From 4568a184387007ae23874e32ebf3ea9cf566d106 Mon Sep 17 00:00:00 2001 From: XinyuanWang283 <68698255+XinyuanWang283@users.noreply.github.com> Date: Tue, 4 Jul 2023 14:06:15 +0100 Subject: [PATCH 3/3] Implemented and tested Rot90 for Paddle frontends (#17708) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: 王新元 Co-authored-by: Rishab Mallick Co-authored-by: DragosStoican <61854405+DragosStoican@users.noreply.github.com> --- .../frontends/paddle/tensor/tensor.py | 7 +++ .../test_tensor/test_paddle_tensor.py | 44 +++++++++++++++++++ 2 files changed, 51 insertions(+) diff --git a/ivy/functional/frontends/paddle/tensor/tensor.py b/ivy/functional/frontends/paddle/tensor/tensor.py index 007526b4a5b81..4670a74a2f9b7 100644 --- a/ivy/functional/frontends/paddle/tensor/tensor.py +++ b/ivy/functional/frontends/paddle/tensor/tensor.py @@ -428,6 +428,13 @@ def max(self, axis=None, keepdim=False, name=None): def deg2rad(self, name=None): return ivy.deg2rad(self._ivy_array) + + @with_supported_dtypes( + {"2.5.0 and below": ("float32", "float64", "int32", "int64", "bool")}, "paddle" + ) + def rot90(self, k=1, axes=(0, 1), name=None): + return ivy.rot90(self._ivy_array, k=k, axes=axes) + @with_supported_dtypes( {"2.5.0 and below": ("complex64", "complex128")}, "paddle", diff --git a/ivy_tests/test_ivy/test_frontends/test_paddle/test_tensor/test_paddle_tensor.py b/ivy_tests/test_ivy/test_frontends/test_paddle/test_tensor/test_paddle_tensor.py index 68a68a6c690f4..da1a1cfbdd440 100644 --- a/ivy_tests/test_ivy/test_frontends/test_paddle/test_tensor/test_paddle_tensor.py +++ b/ivy_tests/test_ivy/test_frontends/test_paddle/test_tensor/test_paddle_tensor.py @@ -9,6 +9,9 @@ import ivy_tests.test_ivy.helpers as helpers from ivy.functional.frontends.paddle import Tensor from ivy_tests.test_ivy.helpers import handle_frontend_method +from ivy_tests.test_ivy.test_functional.test_experimental.test_core.test_manipulation import ( + _get_dtype_values_k_axes_for_rot90, +) CLASS_TREE = "ivy.functional.frontends.paddle.Tensor" @@ -1945,6 +1948,47 @@ def test_paddle_deg2rad( frontend=frontend, on_device=on_device, ) + + +# rot90 +@handle_frontend_method( + class_tree=CLASS_TREE, + init_tree="paddle.to_tensor", + method_name="rot90", + dtype_m_k_axes=_get_dtype_values_k_axes_for_rot90( + available_dtypes=helpers.get_dtypes("valid"), + min_num_dims=3, + max_num_dims=6, + min_dim_size=1, + max_dim_size=10, + ), +) +def test_paddle_rot90( + dtype_m_k_axes, + frontend_method_data, + init_flags, + method_flags, + frontend, + on_device, +): + input_dtype, values, k, axes = dtype_m_k_axes + + helpers.test_frontend_method( + init_input_dtypes=input_dtype, + init_all_as_kwargs_np={ + "data": values, + }, + method_input_dtypes=input_dtype, + method_all_as_kwargs_np={ + "k": k, + "axes": axes, + }, + frontend_method_data=frontend_method_data, + init_flags=init_flags, + method_flags=method_flags, + frontend=frontend, + on_device=on_device, + ) # imag