diff --git a/ivy/functional/backends/jax/linear_algebra.py b/ivy/functional/backends/jax/linear_algebra.py index d81ff499e81df..f9e4ee97dc1cf 100644 --- a/ivy/functional/backends/jax/linear_algebra.py +++ b/ivy/functional/backends/jax/linear_algebra.py @@ -361,7 +361,7 @@ def svd( x: JaxArray, /, *, compute_uv: bool = True, full_matrices: bool = True ) -> Union[JaxArray, Tuple[JaxArray, ...]]: if compute_uv: - results = namedtuple("svd", "U S Vh") + results = namedtuple("svd", 'U S Vh') U, D, VT = jnp.linalg.svd(x, full_matrices=full_matrices, compute_uv=compute_uv) return results(U, D, VT) else: diff --git a/ivy/functional/backends/numpy/linear_algebra.py b/ivy/functional/backends/numpy/linear_algebra.py index 20424a1da7ec5..d580a469f4f66 100644 --- a/ivy/functional/backends/numpy/linear_algebra.py +++ b/ivy/functional/backends/numpy/linear_algebra.py @@ -291,7 +291,7 @@ def svd( x: np.ndarray, /, *, compute_uv: bool = True, full_matrices: bool = True ) -> Union[np.ndarray, Tuple[np.ndarray, ...]]: if compute_uv: - results = namedtuple("svd", "U S Vh") + results = namedtuple("svd", 'U S Vh') U, D, VT = np.linalg.svd(x, full_matrices=full_matrices, compute_uv=compute_uv) return results(U, D, VT) else: diff --git a/ivy/functional/backends/paddle/linear_algebra.py b/ivy/functional/backends/paddle/linear_algebra.py index 1a037d570f32a..5bbefe31838a9 100644 --- a/ivy/functional/backends/paddle/linear_algebra.py +++ b/ivy/functional/backends/paddle/linear_algebra.py @@ -521,7 +521,7 @@ def svd( ) -> Union[paddle.Tensor, Tuple[paddle.Tensor, ...]]: ret = paddle.linalg.svd(x, full_matrices=full_matrices) if compute_uv: - results = namedtuple("svd", "U S Vh") + results = namedtuple("svd", 'U S Vh') return results(*ret) else: results = namedtuple("svd", "S") diff --git a/ivy/functional/backends/tensorflow/linear_algebra.py b/ivy/functional/backends/tensorflow/linear_algebra.py index 0fcaa349572e4..ef2f56e440dc6 100644 --- a/ivy/functional/backends/tensorflow/linear_algebra.py +++ b/ivy/functional/backends/tensorflow/linear_algebra.py @@ -534,8 +534,7 @@ def svd( compute_uv: bool = True, ) -> Union[Union[tf.Tensor, tf.Variable], Tuple[Union[tf.Tensor, tf.Variable], ...]]: if compute_uv: - results = namedtuple("svd", "U S Vh") - + results = namedtuple("svd", 'U S Vh') batch_shape = tf.shape(x)[:-2] num_batch_dims = len(batch_shape) transpose_dims = list(range(num_batch_dims)) + [ diff --git a/ivy/functional/backends/torch/linear_algebra.py b/ivy/functional/backends/torch/linear_algebra.py index e8d960d313b37..26deaf8d25f66 100644 --- a/ivy/functional/backends/torch/linear_algebra.py +++ b/ivy/functional/backends/torch/linear_algebra.py @@ -414,16 +414,13 @@ def svd( x: torch.Tensor, /, *, full_matrices: bool = True, compute_uv: bool = True ) -> Union[torch.Tensor, Tuple[torch.Tensor, ...]]: if compute_uv: - results = namedtuple("svd", "U S Vh") - + results = namedtuple("svd", 'U S Vh') U, D, VT = torch.linalg.svd(x, full_matrices=full_matrices) return results(U, D, VT) else: results = namedtuple("svd", "S") - svd = torch.linalg.svd(x, full_matrices=full_matrices) - # torch.linalg.svd returns a tuple with U, S, and Vh - D = svd[1] - return results(D) + s = torch.linalg.svdvals(x) + return results(s) @with_unsupported_dtypes({"2.2 and below": ("float16", "bfloat16")}, backend_version) diff --git a/ivy/functional/frontends/jax/lax/linalg.py b/ivy/functional/frontends/jax/lax/linalg.py index a48964c859054..c9568fbfdf214 100644 --- a/ivy/functional/frontends/jax/lax/linalg.py +++ b/ivy/functional/frontends/jax/lax/linalg.py @@ -1,6 +1,6 @@ import ivy from ivy.functional.frontends.jax.func_wrapper import to_ivy_arrays_and_back -from ivy.func_wrapper import with_unsupported_dtypes +from ivy.func_wrapper import with_unsupported_dtypes, with_supported_dtypes @to_ivy_arrays_and_back @@ -44,7 +44,29 @@ def qr(x, /, *, full_matrices=False): @to_ivy_arrays_and_back -def svd(x, /, *, full_matrices=True, compute_uv=True): - if not compute_uv: - return ivy.svdvals(x) - return ivy.svd(x, full_matrices=full_matrices) +@with_supported_dtypes( + { + "0.4.14 and below": ( + "float64", + "float32", + "half", + "complex32", + "complex64", + "complex128", + ) + }, + "jax", +) +def svd(x, /, *, full_matrices=True, compute_uv=True, subset_by_index=None): + # TODO: handle subset_by_index + if ivy.is_complex_dtype(x.dtype): + d = ivy.complex128 + else: + d = ivy.float64 + if compute_uv: + svd = ivy.svd(x, compute_uv=compute_uv, full_matrices=full_matrices) + return tuple( + [ivy.astype(svd.U, d), ivy.astype(svd.S, d), ivy.astype(svd.Vh, d)] + ) + else: + return ivy.astype(ivy.svdvals(x), ivy.float64) diff --git a/ivy/functional/frontends/jax/numpy/linalg.py b/ivy/functional/frontends/jax/numpy/linalg.py index 57c69ee5b1596..1265b6648246c 100644 --- a/ivy/functional/frontends/jax/numpy/linalg.py +++ b/ivy/functional/frontends/jax/numpy/linalg.py @@ -120,10 +120,32 @@ def solve(a, b): @to_ivy_arrays_and_back +@with_supported_dtypes( + { + "0.4.24 and below": ( + "float64", + "float32", + "half", + "complex32", + "complex64", + "complex128", + ) + }, + "jax", +) def svd(a, /, *, full_matrices=True, compute_uv=True, hermitian=None): - if not compute_uv: - return ivy.svdvals(a) - return ivy.svd(a, full_matrices=full_matrices) + # TODO: handle hermitian + if ivy.is_complex_dtype(a.dtype): + d = ivy.complex128 + else: + d = ivy.float64 + if compute_uv: + svd = ivy.svd(a, compute_uv=compute_uv, full_matrices=full_matrices) + return tuple( + [ivy.astype(svd.U, d), ivy.astype(svd.S, d), ivy.astype(svd.Vh, d)] + ) + else: + return ivy.astype(ivy.svdvals(a), ivy.float64) @to_ivy_arrays_and_back diff --git a/ivy/functional/frontends/numpy/linalg/decompositions.py b/ivy/functional/frontends/numpy/linalg/decompositions.py index 0452e9c19a271..7147c7d1e6679 100644 --- a/ivy/functional/frontends/numpy/linalg/decompositions.py +++ b/ivy/functional/frontends/numpy/linalg/decompositions.py @@ -1,6 +1,7 @@ # local import ivy from ivy.functional.frontends.numpy.func_wrapper import to_ivy_arrays_and_back +from ivy.func_wrapper import with_supported_dtypes @to_ivy_arrays_and_back @@ -14,6 +15,22 @@ def qr(a, mode="reduced"): @to_ivy_arrays_and_back +@with_supported_dtypes( + { + "1.26.3 and below": ( + "float64", + "float32", + "half", + "complex32", + "complex64", + "complex128", + ) + }, + "numpy", +) def svd(a, full_matrices=True, compute_uv=True, hermitian=False): - # Todo: conpute_uv and hermitian handling - return ivy.svd(a, full_matrices=full_matrices, compute_uv=compute_uv) + # Todo: hermitian handling + if compute_uv: + return ivy.svd(a, full_matrices=full_matrices, compute_uv=compute_uv) + else: + return ivy.astype(ivy.svdvals(a), a.dtype) diff --git a/ivy/functional/frontends/tensorflow/linalg.py b/ivy/functional/frontends/tensorflow/linalg.py index 01a8f293f4497..7d80146b349b3 100644 --- a/ivy/functional/frontends/tensorflow/linalg.py +++ b/ivy/functional/frontends/tensorflow/linalg.py @@ -357,8 +357,32 @@ def solve(matrix, rhs, /, *, adjoint=False, name=None): @to_ivy_arrays_and_back +@with_supported_dtypes( + { + "2.15.0 and below": ( + "float32", + "float64", + "half", + "complex32", + "complex64", + "complex128", + ) + }, + "tensorflow", +) def svd(a, /, *, full_matrices=False, compute_uv=True, name=None): - return ivy.svd(a, compute_uv=compute_uv, full_matrices=full_matrices) + if ivy.is_complex_dtype(a.dtype): + d = ivy.complex128 + else: + d = ivy.float64 + if compute_uv: + svd = ivy.svd(a, compute_uv=compute_uv, full_matrices=full_matrices) + return tuple( + [ivy.astype(svd.S, d), ivy.astype(svd.U, d), ivy.astype(svd.Vh.T, d)] + ) + else: + svd = ivy.svd(a, compute_uv=compute_uv, full_matrices=full_matrices) + return ivy.astype(svd.S, d) @to_ivy_arrays_and_back diff --git a/ivy/functional/frontends/tensorflow/raw_ops.py b/ivy/functional/frontends/tensorflow/raw_ops.py index 61c87dcb5c76b..38ffbcd15b160 100644 --- a/ivy/functional/frontends/tensorflow/raw_ops.py +++ b/ivy/functional/frontends/tensorflow/raw_ops.py @@ -832,12 +832,24 @@ def Sum(*, input, axis, keep_dims=False, name="Sum"): @with_supported_dtypes( - {"2.15.0 and below": ("float64", "float128", "halfcomplex64", "complex128")}, + { + "2.15.0 and below": ( + "float64", + "float32", + "half", + "complex32", + "complex64", + "complex128", + ) + }, "tensorflow", ) @to_ivy_arrays_and_back -def Svd(*, input, full_matrices=False, compute_uv=True, name=None): - return ivy.svd(input, compute_uv=compute_uv, full_matrices=full_matrices) +def Svd(*, input, full_matrices=False, compute_uv=True, name="Svd"): + ret = ivy.svd(input, compute_uv=compute_uv, full_matrices=full_matrices) + if not compute_uv: + return (ret.S, None, None) + return (ret.S, ret.U, ivy.adjoint(ret.Vh)) @to_ivy_arrays_and_back diff --git a/ivy/functional/frontends/torch/blas_and_lapack_ops.py b/ivy/functional/frontends/torch/blas_and_lapack_ops.py index a1c8cd9bbe77b..c0f6f27bd584b 100644 --- a/ivy/functional/frontends/torch/blas_and_lapack_ops.py +++ b/ivy/functional/frontends/torch/blas_and_lapack_ops.py @@ -1,7 +1,8 @@ # global import ivy -from ivy.func_wrapper import with_unsupported_dtypes +from ivy.func_wrapper import with_unsupported_dtypes, with_supported_dtypes import ivy.functional.frontends.torch as torch_frontend +from collections import namedtuple from ivy.functional.frontends.torch.func_wrapper import to_ivy_arrays_and_back @@ -189,13 +190,36 @@ def slogdet(A, *, out=None): return torch_frontend.linalg.slogdet(A, out=out) +@with_supported_dtypes( + { + "2.2 and below": ( + "float64", + "float32", + "half", + "complex32", + "complex64", + "complex128", + ) + }, + "torch", +) @to_ivy_arrays_and_back def svd(input, some=True, compute_uv=True, *, out=None): - # TODO: add compute_uv - if some: - ret = ivy.svd(input, full_matrices=False) + retu = ivy.svd(input, full_matrices=not some, compute_uv=compute_uv) + results = namedtuple("svd", "U S V") + if compute_uv: + ret = results(retu[0], retu[1], ivy.adjoint(retu[2])) else: - ret = ivy.svd(input, full_matrices=True) + shape = list(input.shape) + shape1 = shape + shape2 = shape + shape1[-2] = shape[-1] + shape2[-1] = shape[-2] + ret = results( + ivy.zeros(shape1, device=input.device, dtype=input.dtype), + ivy.astype(retu[0], input.dtype), + ivy.zeros(shape2, device=input.device, dtype=input.dtype), + ) if ivy.exists(out): return ivy.inplace_update(out, ret) return ret diff --git a/ivy/functional/frontends/torch/linalg.py b/ivy/functional/frontends/torch/linalg.py index 0174959d032b6..ad951346831d7 100644 --- a/ivy/functional/frontends/torch/linalg.py +++ b/ivy/functional/frontends/torch/linalg.py @@ -347,11 +347,30 @@ def solve_ex(A, B, *, left=True, check_errors=False, out=None): @to_ivy_arrays_and_back @with_supported_dtypes( - {"2.2 and below": ("float32", "float64", "complex32", "complex64")}, "torch" + { + "2.2 and below": ( + "float64", + "float32", + "half", + "complex32", + "complex64", + "complex128", + ) + }, + "torch", ) def svd(A, /, *, full_matrices=True, driver=None, out=None): - # TODO: add handling for driver and out - return ivy.svd(A, compute_uv=True, full_matrices=full_matrices) + # TODO: add handling for driver + USVh = ivy.svd(A, compute_uv=True, full_matrices=full_matrices) + if ivy.is_complex_dtype(A.dtype): + d = ivy.complex64 + else: + d = ivy.float32 + nt = namedtuple("svd", "U S Vh") + ret = nt(ivy.astype(USVh.U, d), ivy.astype(USVh.S, d), ivy.astype(USVh.Vh, d)) + if ivy.exists(out): + return ivy.inplace_update(out, ret) + return ret @to_ivy_arrays_and_back diff --git a/ivy/functional/frontends/torch/tensor.py b/ivy/functional/frontends/torch/tensor.py index ef09a2942b1db..0e74da12de752 100644 --- a/ivy/functional/frontends/torch/tensor.py +++ b/ivy/functional/frontends/torch/tensor.py @@ -2109,7 +2109,19 @@ def adjoint(self): def conj(self): return torch_frontend.conj(self) - @with_unsupported_dtypes({"2.2 and below": ("float16", "bfloat16")}, "torch") + @with_supported_dtypes( + { + "2.2 and below": ( + "float64", + "float32", + "half", + "complex32", + "complex64", + "complex128", + ) + }, + "torch", + ) def svd(self, some=True, compute_uv=True, *, out=None): return torch_frontend.svd(self, some=some, compute_uv=compute_uv, out=out) diff --git a/ivy/functional/ivy/linear_algebra.py b/ivy/functional/ivy/linear_algebra.py index 4c61f9fdbbaa0..1c9febe327ad5 100644 --- a/ivy/functional/ivy/linear_algebra.py +++ b/ivy/functional/ivy/linear_algebra.py @@ -2130,15 +2130,12 @@ def svd( If ``True`` then left and right singular vectors will be computed and returned in ``U`` and ``Vh``, respectively. Otherwise, only the singular values will be computed, which can be significantly faster. - .. note:: - with backend set as torch, svd with still compute left and right singular - vectors irrespective of the value of compute_uv, however Ivy will still - only return the singular values. Returns ------- .. note:: once complex numbers are supported, each square matrix must be Hermitian. + In addition, the return will be a namedtuple ``(S)`` when compute_uv is ``False`` ret a namedtuple ``(U, S, Vh)`` whose diff --git a/ivy_tests/test_ivy/test_frontends/test_jax/test_lax/test_linalg.py b/ivy_tests/test_ivy/test_frontends/test_jax/test_lax/test_linalg.py index 789512cb08421..dae44990b5a34 100644 --- a/ivy_tests/test_ivy/test_frontends/test_jax/test_lax/test_linalg.py +++ b/ivy_tests/test_ivy/test_frontends/test_jax/test_lax/test_linalg.py @@ -159,39 +159,41 @@ def test_jax_qr( # svd +# TODO: implement proper drawing of index parameter and implement subset_by_index +# and to resolve groundtruth's significant inaccuracy @handle_frontend_test( fn_tree="jax.lax.linalg.svd", - dtype_and_x=helpers.dtype_and_values( - available_dtypes=helpers.get_dtypes("float"), + dtype_x=helpers.dtype_and_values( + available_dtypes=helpers.get_dtypes("valid"), min_value=0, max_value=10, shape=helpers.ints(min_value=2, max_value=5).map(lambda x: (x, x)), - ).filter( - lambda x: "float16" not in x[0] - and "bfloat16" not in x[0] - and np.linalg.cond(x[1][0]) < 1 / sys.float_info.epsilon - and np.linalg.det(np.asarray(x[1][0])) != 0 ), full_matrices=st.booleans(), compute_uv=st.booleans(), + index=st.one_of( + st.none() + # , st.tuples(st.integers(min_value=0, max_value=3), + # st.integers(min_value=3, max_value=5)) + ), test_with_out=st.just(False), ) def test_jax_svd( *, - dtype_and_x, + dtype_x, full_matrices, compute_uv, + index, on_device, fn_tree, frontend, test_flags, backend_fw, ): - dtype, x = dtype_and_x + dtype, x = dtype_x x = np.asarray(x[0], dtype=dtype[0]) - # make symmetric positive-definite beforehand + # make symmetric positive-definite x = np.matmul(x.T, x) + np.identity(x.shape[0]) * 1e-3 - ret, frontend_ret = helpers.test_frontend_function( input_dtypes=dtype, backend_to_test=backend_fw, @@ -203,32 +205,40 @@ def test_jax_svd( x=x, full_matrices=full_matrices, compute_uv=compute_uv, + subset_by_index=index, ) - - if compute_uv: - with BackendHandler.update_backend(backend_fw) as ivy_backend: - ret = [ivy_backend.to_numpy(x) for x in ret] - frontend_ret = [np.asarray(x) for x in frontend_ret] - - u, s, vh = ret - frontend_u, frontend_s, frontend_vh = frontend_ret - + if not compute_uv: + if backend_fw == "torch": + ret = ret.detach() assert_all_close( - ret_np=u @ np.diag(s) @ vh, - ret_from_gt_np=frontend_u @ np.diag(frontend_s) @ frontend_vh, - rtol=1e-2, - atol=1e-2, + ret_np=np.asarray(frontend_ret), + ret_from_gt_np=np.asarray(ret), + rtol=1e-3, backend=backend_fw, ground_truth_backend=frontend, ) else: - with BackendHandler.update_backend(backend_fw) as ivy_backend: - ret = ivy_backend.to_numpy(ret) - assert_all_close( - ret_np=ret, - ret_from_gt_np=np.asarray(frontend_ret[0]), - rtol=1e-2, - atol=1e-2, - backend=backend_fw, - ground_truth_backend=frontend, - ) + if backend_fw == "torch": + ret = [x.detach() for x in ret] + ret = [np.asarray(x) for x in ret] + frontend_ret = [np.asarray(x) for x in frontend_ret] + u, s, v = ret + frontend_u, frontend_s, frontend_v = frontend_ret + if not full_matrices: + helpers.assert_all_close( + ret_np=frontend_u @ np.diag(frontend_s) @ frontend_v.T, + ret_from_gt_np=u @ np.diag(s) @ v.T, + rtol=1e-3, + backend=backend_fw, + ground_truth_backend=frontend, + ) + else: + helpers.assert_all_close( + ret_np=frontend_u[..., : frontend_s.shape[0]] + @ np.diag(frontend_s) + @ frontend_v.T, + ret_from_gt_np=u[..., : s.shape[0]] @ np.diag(s) @ v.T, + rtol=1e-3, + backend=backend_fw, + ground_truth_backend=frontend, + ) diff --git a/ivy_tests/test_ivy/test_frontends/test_jax/test_numpy/test_linalg.py b/ivy_tests/test_ivy/test_frontends/test_jax/test_numpy/test_linalg.py index fb081e1cd406a..aee40b5b147fa 100644 --- a/ivy_tests/test_ivy/test_frontends/test_jax/test_numpy/test_linalg.py +++ b/ivy_tests/test_ivy/test_frontends/test_jax/test_numpy/test_linalg.py @@ -854,15 +854,10 @@ def test_jax_solve( @handle_frontend_test( fn_tree="jax.numpy.linalg.svd", dtype_and_x=helpers.dtype_and_values( - available_dtypes=helpers.get_dtypes("float"), + available_dtypes=helpers.get_dtypes("valid"), min_value=0, max_value=10, shape=helpers.ints(min_value=2, max_value=5).map(lambda x: (x, x)), - ).filter( - lambda x: "float16" not in x[0] - and "bfloat16" not in x[0] - and np.linalg.cond(x[1][0]) < 1 / sys.float_info.epsilon - and np.linalg.det(np.asarray(x[1][0])) != 0 ), full_matrices=st.booleans(), compute_uv=st.booleans(), @@ -881,9 +876,8 @@ def test_jax_svd( ): dtype, x = dtype_and_x x = np.asarray(x[0], dtype=dtype[0]) - # make symmetric positive-definite beforehand + # make symmetric positive-definite x = np.matmul(x.T, x) + np.identity(x.shape[0]) * 1e-3 - ret, frontend_ret = helpers.test_frontend_function( input_dtypes=dtype, frontend=frontend, @@ -896,31 +890,41 @@ def test_jax_svd( full_matrices=full_matrices, compute_uv=compute_uv, ) - if compute_uv: - with BackendHandler.update_backend(backend_fw) as ivy_backend: - ret = [ivy_backend.to_numpy(x) for x in ret] + if backend_fw == "torch": + frontend_ret = [ + x for x in frontend_ret + ] # unpack jaxlib.xla_extension.ArrayImpl as it is no .detach() + ret = [x for x in frontend_ret] + ret = [np.asarray(x) for x in ret] frontend_ret = [np.asarray(x) for x in frontend_ret] - u, s, vh = ret frontend_u, frontend_s, frontend_vh = frontend_ret - - assert_all_close( - ret_np=u @ np.diag(s) @ vh, - ret_from_gt_np=frontend_u @ np.diag(frontend_s) @ frontend_vh, - rtol=1e-2, - atol=1e-2, - backend=backend_fw, - ground_truth_backend=frontend, - ) + if not full_matrices: + helpers.assert_all_close( + ret_np=frontend_u @ np.diag(frontend_s) @ frontend_vh, + ret_from_gt_np=u @ np.diag(s) @ vh, + atol=1e-3, + backend=backend_fw, + ground_truth_backend=frontend, + ) + else: + helpers.assert_all_close( + ret_np=frontend_u[..., : frontend_s.shape[0]] + @ np.diag(frontend_s) + @ frontend_vh, + ret_from_gt_np=u[..., : s.shape[0]] @ np.diag(s) @ vh, + atol=1e-3, + backend=backend_fw, + ground_truth_backend=frontend, + ) else: - with BackendHandler.update_backend(backend_fw) as ivy_backend: - ret = ivy_backend.to_numpy(ret) + if backend_fw == "torch": + ret = ret.detach() assert_all_close( - ret_np=ret, - ret_from_gt_np=np.asarray(frontend_ret[0]), - rtol=1e-2, - atol=1e-2, + ret_np=np.asarray(frontend_ret), + ret_from_gt_np=np.asarray(ret), + atol=1e-3, backend=backend_fw, ground_truth_backend=frontend, ) diff --git a/ivy_tests/test_ivy/test_frontends/test_numpy/test_linalg/test_decompositions.py b/ivy_tests/test_ivy/test_frontends/test_numpy/test_linalg/test_decompositions.py index f740b7278dacb..a8d57e1100b1b 100644 --- a/ivy_tests/test_ivy/test_frontends/test_numpy/test_linalg/test_decompositions.py +++ b/ivy_tests/test_ivy/test_frontends/test_numpy/test_linalg/test_decompositions.py @@ -5,7 +5,7 @@ # local import ivy_tests.test_ivy.helpers as helpers -from ivy_tests.test_ivy.helpers import handle_frontend_test, BackendHandler +from ivy_tests.test_ivy.helpers import handle_frontend_test from ivy_tests.test_ivy.test_functional.test_core.test_linalg import ( _get_dtype_and_matrix, ) @@ -81,10 +81,11 @@ def test_numpy_qr( # svd +# Todo: hermitian handling @handle_frontend_test( fn_tree="numpy.linalg.svd", dtype_and_x=helpers.dtype_and_values( - available_dtypes=helpers.get_dtypes("float"), + available_dtypes=helpers.get_dtypes("valid"), min_value=0.1, max_value=10, shape=helpers.ints(min_value=2, max_value=5).map(lambda x: (x, x)), @@ -104,11 +105,10 @@ def test_numpy_svd( on_device, ): dtype, x = dtype_and_x - x = x[0] - x = ( - np.matmul(x.T, x) + np.identity(x.shape[0]) * 1e-3 - ) # make symmetric positive-definite - ret, ret_gt = helpers.test_frontend_function( + x = np.asarray(x[0], dtype=dtype[0]) + # make symmetric positive-definite + x = np.matmul(x.T, x) + np.identity(x.shape[0]) * 1e-3 + ret, frontend_ret = helpers.test_frontend_function( input_dtypes=dtype, backend_to_test=backend_fw, frontend=frontend, @@ -120,15 +120,34 @@ def test_numpy_svd( full_matrices=full_matrices, compute_uv=compute_uv, ) - with BackendHandler.update_backend(backend_fw) as ivy_backend: - for u, v in zip(ret, ret_gt): - u = ivy_backend.to_numpy(ivy_backend.abs(u)) - v = ivy_backend.to_numpy(ivy_backend.abs(v)) - helpers.value_test( - ret_np_flat=u, - ret_np_from_gt_flat=v, - rtol=1e-04, - atol=1e-04, + if compute_uv: + ret = [np.asarray(x) for x in ret] + frontend_ret = [np.asarray(x) for x in frontend_ret] + u, s, vh = ret + frontend_u, frontend_s, frontend_vh = frontend_ret + if not full_matrices: + helpers.assert_all_close( + ret_np=frontend_u @ np.diag(frontend_s) @ frontend_vh, + ret_from_gt_np=u @ np.diag(s) @ vh, + atol=1e-3, + backend=backend_fw, + ground_truth_backend=frontend, + ) + else: + helpers.assert_all_close( + ret_np=frontend_u[..., : frontend_s.shape[0]] + @ np.diag(frontend_s) + @ frontend_vh, + ret_from_gt_np=u[..., : s.shape[0]] @ np.diag(s) @ vh, + atol=1e-3, backend=backend_fw, ground_truth_backend=frontend, ) + else: + helpers.assert_all_close( + ret_np=frontend_ret, + ret_from_gt_np=ret, + atol=1e-3, + backend=backend_fw, + ground_truth_backend=frontend, + ) diff --git a/ivy_tests/test_ivy/test_frontends/test_tensorflow/test_linalg.py b/ivy_tests/test_ivy/test_frontends/test_tensorflow/test_linalg.py index 24e4d61344997..1f33ca53cc80f 100644 --- a/ivy_tests/test_ivy/test_frontends/test_tensorflow/test_linalg.py +++ b/ivy_tests/test_ivy/test_frontends/test_tensorflow/test_linalg.py @@ -1070,16 +1070,17 @@ def test_tensorflow_solve( ) +# svd @handle_frontend_test( fn_tree="tensorflow.linalg.svd", dtype_and_x=helpers.dtype_and_values( - available_dtypes=helpers.get_dtypes("float"), + available_dtypes=helpers.get_dtypes("valid"), min_value=0, max_value=10, shape=helpers.ints(min_value=2, max_value=5).map(lambda x: (x, x)), ), full_matrices=st.booleans(), - compute_uv=st.just(True), + compute_uv=st.booleans(), ) def test_tensorflow_svd( *, @@ -1094,7 +1095,7 @@ def test_tensorflow_svd( ): dtype, x = dtype_and_x x = np.asarray(x[0], dtype=dtype[0]) - # make symmetric positive definite beforehand + # make symmetric positive definite x = np.matmul(x.T, x) + np.identity(x.shape[0]) * 1e-3 ret, frontend_ret = helpers.test_frontend_function( input_dtypes=dtype, @@ -1104,26 +1105,35 @@ def test_tensorflow_svd( fn_tree=fn_tree, on_device=on_device, test_values=False, - atol=1e-03, - rtol=1e-05, a=x, full_matrices=full_matrices, compute_uv=compute_uv, ) - ret = [ivy.to_numpy(x) for x in ret] - frontend_ret = [np.asarray(x) for x in frontend_ret] - u, s, vh = ret - frontend_s, frontend_u, frontend_vh = frontend_ret + if compute_uv: + ret = [np.asarray(x) for x in ret] + frontend_ret = [np.asarray(x) for x in frontend_ret] - assert_all_close( - ret_np=u @ np.diag(s) @ vh, - ret_from_gt_np=frontend_u @ np.diag(frontend_s) @ frontend_vh.T, - rtol=1e-2, - atol=1e-2, - ground_truth_backend=frontend, - backend=backend_fw, - ) + s, u, v = ret + frontend_s, frontend_u, frontend_v = frontend_ret + + assert_all_close( + ret_np=u @ np.diag(s) @ v.T, + ret_from_gt_np=frontend_u @ np.diag(frontend_s) @ frontend_v.T, + rtol=1e-2, + atol=1e-2, + backend=backend_fw, + ground_truth_backend=frontend, + ) + else: + assert_all_close( + ret_np=np.asarray(ret), + ret_from_gt_np=np.asarray(frontend_ret), + rtol=1e-2, + atol=1e-2, + backend=backend_fw, + ground_truth_backend=frontend, + ) # tensor_diag diff --git a/ivy_tests/test_ivy/test_frontends/test_tensorflow/test_raw_ops.py b/ivy_tests/test_ivy/test_frontends/test_tensorflow/test_raw_ops.py index ad9b5ee4f3114..b312ac1bc7b6e 100644 --- a/ivy_tests/test_ivy/test_frontends/test_tensorflow/test_raw_ops.py +++ b/ivy_tests/test_ivy/test_frontends/test_tensorflow/test_raw_ops.py @@ -4267,16 +4267,17 @@ def test_tensorflow_Sum( # NOQA ) +# svd @handle_frontend_test( fn_tree="tensorflow.raw_ops.Svd", dtype_and_x=helpers.dtype_and_values( available_dtypes=helpers.get_dtypes("valid"), - min_value=0, + min_value=0.1, max_value=10, shape=helpers.ints(min_value=2, max_value=5).map(lambda x: (x, x)), ), full_matrices=st.booleans(), - compute_uv=st.just(True), + compute_uv=st.booleans(), ) def test_tensorflow_Svd( *, @@ -4291,8 +4292,9 @@ def test_tensorflow_Svd( ): dtype, x = dtype_and_x x = np.asarray(x[0], dtype=dtype[0]) - # make symmetric positive definite beforehand + # make symmetric positive definite x = np.matmul(x.T, x) + np.identity(x.shape[0]) * 1e-3 + x = x.astype(dtype[0]) ret, frontend_ret = helpers.test_frontend_function( input_dtypes=dtype, backend_to_test=backend_fw, @@ -4305,20 +4307,45 @@ def test_tensorflow_Svd( full_matrices=full_matrices, compute_uv=compute_uv, ) - ret = [ivy.to_numpy(x) for x in ret] - frontend_ret = [np.asarray(x) for x in frontend_ret] - - u, s, vh = ret - frontend_s, frontend_u, frontend_vh = frontend_ret - - assert_all_close( - ret_np=u @ np.diag(s) @ vh, - ret_from_gt_np=frontend_u @ np.diag(frontend_s) @ frontend_vh.T, - rtol=1e-2, - atol=1e-2, - ground_truth_backend=frontend, - backend=backend_fw, - ) + if backend_fw == "torch": + if not compute_uv: + ret = [ret[0].detach(), None, None] + else: + ret = [x.detach() for x in ret] + ret = [np.asarray(x) for x in ret] + frontend_ret = [np.asarray(x).astype(dtype[0]) for x in frontend_ret] + s, u, v = ret + frontend_s, frontend_u, frontend_v = frontend_ret + if compute_uv: + if not full_matrices: + helpers.assert_all_close( + ret_np=frontend_u @ np.diag(frontend_s) @ frontend_v.T, + ret_from_gt_np=u @ np.diag(s) @ v.T, + rtol=1e-3, + atol=1e-3, + backend=backend_fw, + ground_truth_backend=frontend, + ) + else: + helpers.assert_all_close( + ret_np=frontend_u[..., : frontend_s.shape[0]] + @ np.diag(frontend_s) + @ frontend_v.T, + ret_from_gt_np=u[..., : s.shape[0]] @ np.diag(s) @ v.T, + rtol=1e-3, + atol=1e-3, + backend=backend_fw, + ground_truth_backend=frontend, + ) + else: + assert_all_close( + ret_np=frontend_s, + ret_from_gt_np=s.astype(dtype[0]), + rtol=1e-3, + atol=1e-3, + backend=backend_fw, + ground_truth_backend=frontend, + ) # Tan diff --git a/ivy_tests/test_ivy/test_frontends/test_torch/test_blas_and_lapack_ops.py b/ivy_tests/test_ivy/test_frontends/test_torch/test_blas_and_lapack_ops.py index f66853869c8e4..8cd18e858f16e 100644 --- a/ivy_tests/test_ivy/test_frontends/test_torch/test_blas_and_lapack_ops.py +++ b/ivy_tests/test_ivy/test_frontends/test_torch/test_blas_and_lapack_ops.py @@ -848,37 +848,73 @@ def test_torch_qr( @handle_frontend_test( fn_tree="torch.svd", dtype_and_x=helpers.dtype_and_values( - available_dtypes=helpers.get_dtypes("float", index=1), - min_num_dims=3, - max_num_dims=5, - min_dim_size=2, - max_dim_size=5, + available_dtypes=helpers.get_dtypes("valid"), + min_value=0, + max_value=10, + shape=helpers.ints(min_value=2, max_value=5).map(lambda x: (x, x)), ), some=st.booleans(), - compute=st.booleans(), + compute_uv=st.booleans(), ) def test_torch_svd( dtype_and_x, some, - compute, - on_device, - fn_tree, + compute_uv, frontend, test_flags, + fn_tree, backend_fw, + on_device, ): - dtype, x = dtype_and_x - helpers.test_frontend_function( - input_dtypes=dtype, + input_dtype, x = dtype_and_x + x = np.asarray(x[0], dtype=input_dtype[0]) + # make symmetric positive definite + x = np.matmul(x.T, x) + np.identity(x.shape[0]) * 1e-3 + ret, frontend_ret = helpers.test_frontend_function( + input_dtypes=input_dtype, backend_to_test=backend_fw, frontend=frontend, test_flags=test_flags, fn_tree=fn_tree, on_device=on_device, - input=x[0], + test_values=False, + input=x, some=some, - compute_uv=compute, - ) + compute_uv=compute_uv, + ) + if backend_fw == "torch": + frontend_ret = [x.detach() for x in frontend_ret] + ret = [x.detach() for x in ret] + ret = [np.asarray(x) for x in ret] + frontend_ret = [np.asarray(x.resolve_conj()) for x in frontend_ret] + u, s, v = ret + frontend_u, frontend_s, frontend_v = frontend_ret + if not compute_uv: + helpers.assert_all_close( + ret_np=frontend_s, + ret_from_gt_np=s, + atol=1e-04, + backend=backend_fw, + ground_truth_backend=frontend, + ) + elif not some: + helpers.assert_all_close( + ret_np=frontend_u @ np.diag(frontend_s) @ frontend_v.T, + ret_from_gt_np=u @ np.diag(s) @ v.T, + atol=1e-04, + backend=backend_fw, + ground_truth_backend=frontend, + ) + else: + helpers.assert_all_close( + ret_np=frontend_u[..., : frontend_s.shape[0]] + @ np.diag(frontend_s) + @ frontend_v.T, + ret_from_gt_np=u[..., : s.shape[0]] @ np.diag(s) @ v.T, + atol=1e-04, + backend=backend_fw, + ground_truth_backend=frontend, + ) @handle_frontend_test( diff --git a/ivy_tests/test_ivy/test_frontends/test_torch/test_linalg.py b/ivy_tests/test_ivy/test_frontends/test_torch/test_linalg.py index cda403aa53092..6710067a2678e 100644 --- a/ivy_tests/test_ivy/test_frontends/test_torch/test_linalg.py +++ b/ivy_tests/test_ivy/test_frontends/test_torch/test_linalg.py @@ -1242,7 +1242,12 @@ def test_torch_solve_ex( # svd @handle_frontend_test( fn_tree="torch.linalg.svd", - dtype_and_x=_get_dtype_and_matrix(square=True), + dtype_and_x=helpers.dtype_and_values( + available_dtypes=helpers.get_dtypes("valid"), + min_value=0, + max_value=10, + shape=helpers.ints(min_value=2, max_value=5).map(lambda x: (x, x)), + ), full_matrices=st.booleans(), ) def test_torch_svd( @@ -1257,7 +1262,7 @@ def test_torch_svd( ): dtype, x = dtype_and_x x = np.asarray(x[0], dtype=dtype[0]) - # make symmetric positive definite beforehand + # make symmetric positive definite x = np.matmul(x.T, x) + np.identity(x.shape[0]) * 1e-3 ret, frontend_ret = helpers.test_frontend_function( input_dtypes=dtype, @@ -1267,25 +1272,36 @@ def test_torch_svd( fn_tree=fn_tree, on_device=on_device, test_values=False, - atol=1e-03, - rtol=1e-05, A=x, full_matrices=full_matrices, ) - ret = [ivy.to_numpy(x) for x in ret] + if backend_fw == "torch": + frontend_ret = [x.detach() for x in frontend_ret] + ret = [x.detach() for x in ret] + ret = [np.asarray(x) for x in ret] frontend_ret = [np.asarray(x) for x in frontend_ret] - u, s, vh = ret frontend_u, frontend_s, frontend_vh = frontend_ret - - assert_all_close( - ret_np=u @ np.diag(s) @ vh, - ret_from_gt_np=frontend_u @ np.diag(frontend_s) @ frontend_vh, - rtol=1e-2, - atol=1e-2, - ground_truth_backend=frontend, - backend=backend_fw, - ) + if full_matrices: + helpers.assert_all_close( + ret_np=( + frontend_u[..., : frontend_s.shape[0]] + @ np.diag(frontend_s) + @ frontend_vh + ), + ret_from_gt_np=u[..., : s.shape[0]] @ np.diag(s) @ vh, + atol=1e-04, + backend=backend_fw, + ground_truth_backend=frontend, + ) + else: + helpers.assert_all_close( + ret_np=(frontend_u @ np.diag(frontend_s) @ frontend_vh), + ret_from_gt_np=u @ np.diag(s) @ vh, + atol=1e-04, + backend=backend_fw, + ground_truth_backend=frontend, + ) # svdvals diff --git a/ivy_tests/test_ivy/test_frontends/test_torch/test_tensor.py b/ivy_tests/test_ivy/test_frontends/test_torch/test_tensor.py index 1b4c8080695b6..eaad5586b17fd 100644 --- a/ivy_tests/test_ivy/test_frontends/test_torch/test_tensor.py +++ b/ivy_tests/test_ivy/test_frontends/test_torch/test_tensor.py @@ -13096,12 +13096,13 @@ def test_torch_sum( ) +# svd @handle_frontend_method( class_tree=CLASS_TREE, init_tree="torch.tensor", method_name="svd", dtype_and_x=helpers.dtype_and_values( - available_dtypes=helpers.get_dtypes("float"), + available_dtypes=helpers.get_dtypes("valid"), min_value=0, max_value=10, shape=helpers.ints(min_value=2, max_value=5).map(lambda x: (x, x)), @@ -13122,7 +13123,8 @@ def test_torch_svd( ): input_dtype, x = dtype_and_x x = np.asarray(x[0], dtype=input_dtype[0]) - + # make symmetric positive-definite + x = np.matmul(x.T, x) + np.identity(x.shape[0]) * 1e-3 ret, frontend_ret = helpers.test_frontend_method( init_input_dtypes=input_dtype, init_all_as_kwargs_np={ @@ -13141,28 +13143,33 @@ def test_torch_svd( on_device=on_device, test_values=False, ) - with helpers.update_backend(backend_fw) as ivy_backend: - ret = [ivy_backend.to_numpy(x) for x in ret] - frontend_ret = [np.asarray(x) for x in frontend_ret] - - u, s, vh = ret - frontend_u, frontend_s, frontend_vh = frontend_ret - - if compute_uv: + ret = [np.asarray(x) for x in ret] + frontend_ret = [np.asarray(x.resolve_conj()) for x in frontend_ret] + u, s, v = ret + frontend_u, frontend_s, frontend_v = frontend_ret + if not compute_uv: + helpers.assert_all_close( + ret_np=frontend_s, + ret_from_gt_np=s, + atol=1e-04, + backend=backend_fw, + ground_truth_backend=frontend, + ) + elif not some: helpers.assert_all_close( - ret_np=frontend_u @ np.diag(frontend_s) @ frontend_vh.T, - ret_from_gt_np=u @ np.diag(s) @ vh, - rtol=1e-2, - atol=1e-2, + ret_np=frontend_u @ np.diag(frontend_s) @ frontend_v.T, + ret_from_gt_np=u @ np.diag(s) @ v.T, + atol=1e-04, backend=backend_fw, ground_truth_backend=frontend, ) else: helpers.assert_all_close( - ret_np=frontend_s, - ret_from_gt_np=s, - rtol=1e-2, - atol=1e-2, + ret_np=frontend_u[..., : frontend_s.shape[0]] + @ np.diag(frontend_s) + @ frontend_v.T, + ret_from_gt_np=u[..., : s.shape[0]] @ np.diag(s) @ v.T, + atol=1e-04, backend=backend_fw, ground_truth_backend=frontend, )