Skip to content

Commit

Permalink
Merge branch 'unifyai:main' into main
Browse files Browse the repository at this point in the history
  • Loading branch information
muzakkirhussain011 authored Mar 21, 2024
2 parents 69b7fe5 + 9b6c898 commit 451e5fb
Show file tree
Hide file tree
Showing 9 changed files with 189 additions and 45 deletions.
6 changes: 3 additions & 3 deletions docs/overview/volunteer_ranks.rst
Original file line number Diff line number Diff line change
Expand Up @@ -72,9 +72,6 @@ Contributors
* - Ayush Lokare
- `ayush111111 <https://github.com/ayush111111>`_
- Merging Master, Ivy Inspector
* - Chaitanya Lakhchaura
- `ZenithFlux <https://github.com/ZenithFlux>`_
- Debugging Dynamo, Merging Master
* - David Adlai Nettey
- `Adlai-1 <https://github.com/Adlai-1>`_
- Merging Master, Ivy Inspector
Expand Down Expand Up @@ -108,6 +105,9 @@ Contributors
* - Aryan Pandey
- `Aryan8912 <https://github.com/Aryan8912>`_
- Merging Master
* - Chaitanya Lakhchaura
- `ZenithFlux <https://github.com/ZenithFlux>`_
- Merging Master
* - Dhruv Sharma
- `druvdub <https://github.com/druvdub>`_
- Merging Master
Expand Down
24 changes: 24 additions & 0 deletions ivy/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -361,6 +361,17 @@ def __sizeof__(self):
def __dir__(self):
return self._shape.__dir__()

def __getnewargs__(self):
if self._shape is None:
raise ivy.utils.exceptions.IvyException(
"Cannot calculate the number of elements in a partially known Shape"
)
return (
builtins.tuple(
self._shape,
),
)

@property
def shape(self):
return self._shape
Expand Down Expand Up @@ -477,6 +488,19 @@ def as_list(self):
)
return list(self._shape)

def numel(self):
if self._shape is None:
raise ivy.utils.exceptions.IvyException(
"Cannot calculate the number of elements in a partially known Shape"
)
res = 1
for dim in self._shape:
res *= dim
return res

def __concat__(self, other):
return self.concatenate(other)


class IntDtype(Dtype):
def __new__(cls, dtype_str):
Expand Down
6 changes: 1 addition & 5 deletions ivy/functional/frontends/paddle/creation.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,11 +19,7 @@ def arange(start, end=None, step=1, dtype=None, name=None):
)
@to_ivy_arrays_and_back
def assign(x, output=None):
if len(ivy.shape(x)) == 0:
x = ivy.reshape(ivy.Array(x), (1,))
if ivy.exists(output):
output = ivy.reshape(ivy.Array(output), (1,))
else:
if len(ivy.shape(x)) != 0:
x = ivy.reshape(x, ivy.shape(x))
ret = ivy.copy_array(x, to_ivy_array=False, out=output)
return ret
Expand Down
4 changes: 3 additions & 1 deletion ivy/functional/frontends/paddle/search.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,9 @@ def argsort(x, /, *, axis=-1, descending=False, name=None):
)
@to_ivy_arrays_and_back
def index_sample(x, index):
return x[ivy.arange(x.shape[0])[:, None], index]
index_dtype = index.dtype
arange_tensor = ivy.arange(x.shape[0], dtype=index_dtype)[:, None]
return x[arange_tensor, index]


# kthvalue
Expand Down
29 changes: 29 additions & 0 deletions ivy/functional/frontends/sklearn/metrics/_classification.py
Original file line number Diff line number Diff line change
Expand Up @@ -69,6 +69,35 @@ def f1_score(y_true, y_pred, *, sample_weight=None):
return ret


@to_ivy_arrays_and_back
def hamming_loss(y_true, y_pred, *, sample_weight=None):
# Ensure that y_true and y_pred have the same shape
if y_true.shape != y_pred.shape:
raise IvyValueError("y_true and y_pred must have the same shape")

# Check if sample_weight is provided and normalize it
if sample_weight is not None:
sample_weight = ivy.array(sample_weight)
if sample_weight.shape[0] != y_true.shape[0]:
raise IvyValueError(
"sample_weight must have the same length as y_true and y_pred"
)
sample_weight = sample_weight / ivy.sum(sample_weight)
else:
sample_weight = ivy.ones_like(y_true)

# Calculate the Hamming loss
incorrect_predictions = ivy.not_equal(y_true, y_pred).astype("int64")
# Apply sample weights
weighted_incorrect_predictions = ivy.multiply(incorrect_predictions, sample_weight)

# Compute hamming loss
loss = ivy.sum(weighted_incorrect_predictions) / y_true.shape[0]

loss = loss.astype("float64")
return loss


@to_ivy_arrays_and_back
def precision_score(y_true, y_pred, *, sample_weight=None):
# Ensure that y_true and y_pred have the same shape
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -108,6 +108,71 @@ def test_sklearn_f1_score(
)


@handle_frontend_test(
fn_tree="sklearn.metrics.hamming_loss",
arrays_and_dtypes=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("integer"),
num_arrays=2,
min_value=0,
max_value=1, # Hamming loss is for binary classification
shared_dtype=True,
shape=(helpers.ints(min_value=2, max_value=5)),
),
sample_weight=st.lists(
st.floats(min_value=0.1, max_value=1), min_size=2, max_size=5
),
)
def test_sklearn_hamming_loss(
arrays_and_dtypes,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
sample_weight,
):
dtypes, values = arrays_and_dtypes
# Ensure the values are binary by rounding and converting to int
for i in range(2):
values[i] = np.round(values[i]).astype(int)

# Adjust sample_weight to have the correct length
sample_weight = np.array(sample_weight).astype(float)
if len(sample_weight) != len(values[0]):
# If sample_weight is shorter, extend it with ones
sample_weight = np.pad(
sample_weight,
(0, max(0, len(values[0]) - len(sample_weight))),
"constant",
constant_values=1.0,
)
# If sample_weight is longer, truncate it
sample_weight = sample_weight[: len(values[0])]

# Detach tensors if they require grad before converting to NumPy arrays
if backend_fw == "torch":
values = [
(
value.detach().numpy()
if isinstance(value, torch.Tensor) and value.requires_grad
else value
)
for value in values
]

helpers.test_frontend_function(
input_dtypes=dtypes,
backend_to_test=backend_fw,
test_flags=test_flags,
fn_tree=fn_tree,
frontend=frontend,
on_device=on_device,
y_true=values[0],
y_pred=values[1],
sample_weight=sample_weight,
)


@handle_frontend_test(
fn_tree="sklearn.metrics.precision_score",
arrays_and_dtypes=helpers.dtype_and_values(
Expand Down
Original file line number Diff line number Diff line change
@@ -1,15 +1,11 @@
# global
import sys
import numpy as np
from hypothesis import strategies as st, assume
from hypothesis import strategies as st

# local
import ivy_tests.test_ivy.helpers as helpers
from ivy_tests.test_ivy.helpers import handle_frontend_test
from ivy_tests.test_ivy.test_functional.test_core.test_linalg import _matrix_rank_helper
from ivy_tests.test_ivy.helpers.hypothesis_helpers.general_helpers import (
matrix_is_stable,
)


# --- Helpers --- #
Expand Down Expand Up @@ -691,37 +687,6 @@ def test_torch_matmul(
)


# matrix_rank
@handle_frontend_test(
fn_tree="torch.linalg.matrix_rank",
# aliases=["torch.matrix_rank",], deprecated since 1.9. uncomment with multi-version
# testing pipeline
dtype_x_hermitian_atol_rtol=_matrix_rank_helper(),
)
def test_torch_matrix_rank(
dtype_x_hermitian_atol_rtol,
on_device,
fn_tree,
frontend,
test_flags,
backend_fw,
):
dtype, x, hermitian, atol, rtol = dtype_x_hermitian_atol_rtol
assume(matrix_is_stable(x, cond_limit=10))
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
A=x,
atol=atol,
rtol=rtol,
hermitian=hermitian,
)


# mm
@handle_frontend_test(
fn_tree="torch.mm",
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -523,6 +523,9 @@ def test_torch_unfold(
backend_fw,
):
dtype, vals, kernel_shape, dilations, strides, padding = dtype_vals
# TODO add bfloat16 to unsupported dtypes of the tested function
if backend_fw == "paddle":
assume("bfloat16" not in dtype[0])
helpers.test_frontend_function(
input_dtypes=dtype,
backend_to_test=backend_fw,
Expand Down
60 changes: 60 additions & 0 deletions ivy_tests/test_ivy/test_misc/test_shape.py
Original file line number Diff line number Diff line change
Expand Up @@ -182,6 +182,36 @@ def test_shape__getitem__(
)


@handle_method(
init_tree=CLASS_TREE,
method_tree="Shape.__getnewargs__",
shape=helpers.get_shape(),
)
def test_shape__getnewargs__(
shape,
method_name,
class_name,
ground_truth_backend,
backend_fw,
init_flags,
method_flags,
on_device,
):
helpers.test_method(
on_device=on_device,
ground_truth_backend=ground_truth_backend,
backend_to_test=backend_fw,
init_flags=init_flags,
method_flags=method_flags,
init_all_as_kwargs_np={"shape_tup": shape},
init_input_dtypes=DUMMY_DTYPE,
method_input_dtypes=[],
method_all_as_kwargs_np={},
class_name=class_name,
method_name=method_name,
)


@handle_method(
init_tree=CLASS_TREE,
method_tree="Shape.__gt__",
Expand Down Expand Up @@ -442,3 +472,33 @@ def test_shape_in_conditions():
shape = ivy.Shape(())
condition_is_true = True if shape else False
assert not condition_is_true


@handle_method(
init_tree=CLASS_TREE,
method_tree="Shape.numel",
shape=helpers.get_shape(),
)
def test_shape_numel(
shape,
method_name,
class_name,
ground_truth_backend,
backend_fw,
init_flags,
method_flags,
on_device,
):
helpers.test_method(
on_device=on_device,
ground_truth_backend=ground_truth_backend,
backend_to_test=backend_fw,
init_flags=init_flags,
method_flags=method_flags,
init_all_as_kwargs_np={"shape_tup": shape},
init_input_dtypes=DUMMY_DTYPE,
method_input_dtypes=[],
method_all_as_kwargs_np={},
class_name=class_name,
method_name=method_name,
)

0 comments on commit 451e5fb

Please sign in to comment.