diff --git a/ivy/functional/frontends/paddle/tensor/tensor.py b/ivy/functional/frontends/paddle/tensor/tensor.py index f3fe3500552a7..51ba3eee9a1d7 100644 --- a/ivy/functional/frontends/paddle/tensor/tensor.py +++ b/ivy/functional/frontends/paddle/tensor/tensor.py @@ -986,6 +986,18 @@ def trace(self, offset=0, axis1=0, axis2=1, name=None): ivy.trace(self._ivy_array, offset=offset, axis1=axis1, axis2=axis2) ) + @with_supported_dtypes({"2.6.0 and below": ("float64", "float32")}, "paddle") + def cov(self, rowvar=True, ddof=True, fweights=None, aweights=None): + return paddle_frontend.Tensor( + ivy.cov( + self._ivy_array, + rowVar=rowvar, + ddof=int(ddof), + fweights=fweights, + aweights=aweights, + ) + ) + @with_supported_dtypes( { "2.6.0 and below": ( diff --git a/ivy_tests/test_ivy/test_frontends/test_paddle/test_tensor/test_tensor.py b/ivy_tests/test_ivy/test_frontends/test_paddle/test_tensor/test_tensor.py index 60bc850a30f8a..eff1506aae721 100644 --- a/ivy_tests/test_ivy/test_frontends/test_paddle/test_tensor/test_tensor.py +++ b/ivy_tests/test_ivy/test_frontends/test_paddle/test_tensor/test_tensor.py @@ -312,6 +312,94 @@ def _get_dtype_input_and_matrices_for_matmul(draw): return dtype, mat1, mat2, transpose_x, transpose_y +@st.composite +def _get_dtype_value1_value2_cov( + draw, + available_dtypes, + min_num_dims, + max_num_dims, + min_dim_size, + max_dim_size, + abs_smallest_val=None, + min_value=None, + max_value=None, + allow_inf=False, + exclude_min=False, + exclude_max=False, + large_abs_safety_factor=4, + small_abs_safety_factor=4, + safety_factor_scale="log", +): + shape = draw( + helpers.get_shape( + allow_none=False, + min_num_dims=min_num_dims, + max_num_dims=max_num_dims, + min_dim_size=min_dim_size, + max_dim_size=max_dim_size, + ) + ) + + dtype = draw(st.sampled_from(draw(available_dtypes))) + + values = [] + for i in range(1): + values.append( + draw( + helpers.array_values( + dtype=dtype, + shape=shape, + abs_smallest_val=abs_smallest_val, + min_value=min_value, + max_value=max_value, + allow_inf=allow_inf, + exclude_min=exclude_min, + exclude_max=exclude_max, + large_abs_safety_factor=large_abs_safety_factor, + small_abs_safety_factor=small_abs_safety_factor, + safety_factor_scale=safety_factor_scale, + ) + ) + ) + + value = values[0] + + # modifiers: rowVar, bias, ddof + rowVar = draw(st.booleans()) + ddof = draw(st.booleans()) + + numVals = None + if rowVar is False: + numVals = -1 if numVals == 0 else 0 + else: + numVals = 0 if len(shape) == 1 else -1 + + fweights = draw( + helpers.array_values( + dtype="int64", + shape=shape[numVals], + abs_smallest_val=1, + min_value=1, + max_value=10, + allow_inf=False, + ) + ) + + aweights = draw( + helpers.array_values( + dtype="float64", + shape=shape[numVals], + abs_smallest_val=1, + min_value=1, + max_value=10, + allow_inf=False, + small_abs_safety_factor=1, + ) + ) + + return [dtype], value, rowVar, ddof, fweights, aweights + + @st.composite def _reshape_helper(draw): # generate a shape s.t len(shape) > 0 @@ -5686,6 +5774,57 @@ def test_paddle_tensor_chunk( ) +# cov +@handle_frontend_method( + class_tree=CLASS_TREE, + init_tree="paddle.to_tensor", + method_name="cov", + dtype_x1_corr_cov=_get_dtype_value1_value2_cov( + available_dtypes=helpers.get_dtypes("float"), + min_num_dims=2, + max_num_dims=2, + min_dim_size=2, + max_dim_size=5, + min_value=1, + max_value=1e10, + abs_smallest_val=0.01, + large_abs_safety_factor=2, + safety_factor_scale="log", + ), +) +def test_paddle_tensor_cov( + dtype_x1_corr_cov, + frontend_method_data, + init_flags, + method_flags, + frontend, + backend_fw, + on_device, +): + dtype, x, rowvar, ddof, fweights, aweights = dtype_x1_corr_cov + helpers.test_frontend_method( + init_input_dtypes=["float64", "int64", "float64"], + init_all_as_kwargs_np={ + "data": x, + }, + method_input_dtypes=["int64", "float64"], + backend_to_test=backend_fw, + method_all_as_kwargs_np={ + "rowvar": rowvar, + "ddof": ddof, + "fweights": fweights, + "aweights": aweights, + }, + frontend_method_data=frontend_method_data, + init_flags=init_flags, + method_flags=method_flags, + rtol_=1e-3, + atol_=1e-3, + frontend=frontend, + on_device=on_device, + ) + + # expand @handle_frontend_method( class_tree=CLASS_TREE,