diff --git a/ivy/functional/frontends/sklearn/metrics/_classification.py b/ivy/functional/frontends/sklearn/metrics/_classification.py index e6679505631b5..4cee0d9e9187b 100644 --- a/ivy/functional/frontends/sklearn/metrics/_classification.py +++ b/ivy/functional/frontends/sklearn/metrics/_classification.py @@ -17,3 +17,36 @@ def accuracy_score(y_true, y_pred, *, normalize=True, sample_weight=None): ret = ret / y_true.shape[0] ret = ret.astype("float64") return ret + + +@to_ivy_arrays_and_back +def recall_score(y_true, y_pred, *, sample_weight=None): + # Ensure that y_true and y_pred have the same shape + if y_true.shape != y_pred.shape: + raise IvyValueError("y_true and y_pred must have the same shape") + + # Check if sample_weight is provided and normalize it + if sample_weight is not None: + sample_weight = ivy.array(sample_weight) + if sample_weight.shape[0] != y_true.shape[0]: + raise IvyValueError( + "sample_weight must have the same length as y_true and y_pred" + ) + sample_weight = sample_weight / ivy.sum(sample_weight) + else: + sample_weight = ivy.ones_like(y_true) + + # Calculate true positives and actual positives + true_positives = ivy.logical_and(ivy.equal(y_true, 1), ivy.equal(y_pred, 1)).astype( + "int64" + ) + actual_positives = ivy.equal(y_true, 1).astype("int64") + + # Apply sample weights + weighted_true_positives = ivy.multiply(true_positives, sample_weight) + weighted_actual_positives = ivy.multiply(actual_positives, sample_weight) + + # Compute recall + ret = ivy.sum(weighted_true_positives) / ivy.sum(weighted_actual_positives) + ret = ret.astype("float64") + return ret diff --git a/ivy_tests/test_ivy/test_frontends/test_sklearn/test_metrics/test_classification.py b/ivy_tests/test_ivy/test_frontends/test_sklearn/test_metrics/test_classification.py index 7977c77df3d03..1d8342188abee 100644 --- a/ivy_tests/test_ivy/test_frontends/test_sklearn/test_metrics/test_classification.py +++ b/ivy_tests/test_ivy/test_frontends/test_sklearn/test_metrics/test_classification.py @@ -1,5 +1,5 @@ from hypothesis import strategies as st - +import torch import ivy_tests.test_ivy.helpers as helpers from ivy_tests.test_ivy.helpers import handle_frontend_test import numpy as np @@ -43,3 +43,68 @@ def test_sklearn_accuracy_score( normalize=normalize, sample_weight=None, ) + + +@handle_frontend_test( + fn_tree="sklearn.metrics.recall_score", + arrays_and_dtypes=helpers.dtype_and_values( + available_dtypes=helpers.get_dtypes("integer"), + num_arrays=2, + min_value=0, + max_value=1, # Recall score is for binary classification + shared_dtype=True, + shape=(helpers.ints(min_value=2, max_value=5)), + ), + sample_weight=st.lists( + st.floats(min_value=0.1, max_value=1), min_size=2, max_size=5 + ), +) +def test_sklearn_recall_score( + arrays_and_dtypes, + on_device, + fn_tree, + frontend, + test_flags, + backend_fw, + sample_weight, +): + dtypes, values = arrays_and_dtypes + # Ensure the values are binary by rounding and converting to int + for i in range(2): + values[i] = np.round(values[i]).astype(int) + + # Adjust sample_weight to have the correct length + sample_weight = np.array(sample_weight).astype(float) + if len(sample_weight) != len(values[0]): + # If sample_weight is shorter, extend it with ones + sample_weight = np.pad( + sample_weight, + (0, max(0, len(values[0]) - len(sample_weight))), + "constant", + constant_values=1.0, + ) + # If sample_weight is longer, truncate it + sample_weight = sample_weight[: len(values[0])] + + # Detach tensors if they require grad before converting to NumPy arrays + if backend_fw == "torch": + values = [ + ( + value.detach().numpy() + if isinstance(value, torch.Tensor) and value.requires_grad + else value + ) + for value in values + ] + + helpers.test_frontend_function( + input_dtypes=dtypes, + backend_to_test=backend_fw, + test_flags=test_flags, + fn_tree=fn_tree, + frontend=frontend, + on_device=on_device, + y_true=values[0], + y_pred=values[1], + sample_weight=sample_weight, + )