Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

added std instance method to pytorch frontend #15003

Merged
merged 9 commits into from
May 20, 2023
1 change: 0 additions & 1 deletion ivy/functional/backends/paddle/creation.py
Original file line number Diff line number Diff line change
Expand Up @@ -184,7 +184,6 @@ def empty_like(
) -> paddle.Tensor:
return to_device(paddle.empty_like(x=x.cast("float32")).cast(dtype), device)


@with_unsupported_device_and_dtypes(
{
"2.4.2 and below": {
Expand Down
1 change: 1 addition & 0 deletions ivy/functional/frontends/torch/reduction_ops.py
Original file line number Diff line number Diff line change
Expand Up @@ -95,6 +95,7 @@ def median(input, dim=None, keepdim=False, *, out=None):


@to_ivy_arrays_and_back
@with_unsupported_dtypes({"1.11.0 and below": ("float16",)}, "torch")
fnhirwa marked this conversation as resolved.
Show resolved Hide resolved
def std(input, dim=None, unbiased=True, keepdim=False, *, out=None):
return ivy.std(input, axis=dim, correction=int(unbiased), keepdims=keepdim, out=out)

Expand Down
5 changes: 5 additions & 0 deletions ivy/functional/frontends/torch/tensor.py
Original file line number Diff line number Diff line change
Expand Up @@ -1136,6 +1136,11 @@ def addcdiv(self, tensor1, tensor2, *, value=1):
def sign(self):
return torch_frontend.sign(self._ivy_array)

def std(self, dim=None, unbiased=True, keepdim=False, *, out=None):
return torch_frontend.std(
self, dim=dim, unbiased=unbiased, keepdim=keepdim, out=out
)

@with_unsupported_dtypes({"1.11.0 and below": ("float16", "bfloat16")}, "torch")
def fmod(self, other, *, out=None):
return torch_frontend.fmod(self, other, out=out)
Expand Down
2 changes: 1 addition & 1 deletion ivy/functional/ivy/experimental/creation.py
Original file line number Diff line number Diff line change
Expand Up @@ -663,7 +663,7 @@ def _iter_product(*args, repeat=1):
for prod in result:
yield tuple(prod)


@handle_exceptions
@inputs_to_ivy_arrays
def ndenumerate(
Expand Down
36 changes: 36 additions & 0 deletions ivy_tests/test_ivy/test_frontends/test_torch/test_tensor.py
Original file line number Diff line number Diff line change
Expand Up @@ -8015,6 +8015,42 @@ def test_torch_instance_sign(
)


# std
@handle_frontend_method(
class_tree=CLASS_TREE,
init_tree="torch.tensor",
method_name="std",
dtype_and_x=helpers.dtype_and_values(
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This strategy will generate unstable examples you can use this one as it is already implemented to handle statistical values https://github.com/unifyai/ivy/blob/0a32c763fa1d7fd2fd5c9803b31eeb23752e2947/ivy_tests/test_ivy/test_functional/test_core/test_statistical.py#L12

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

hey @hirwa-nshuti done!

available_dtypes=helpers.get_dtypes("float"),
num_arrays=1,
min_value=-1e04,
max_value=1e04,
),
)
def test_torch_instance_std(
dtype_and_x,
frontend,
frontend_method_data,
init_flags,
method_flags,
on_device,
):
input_dtype, x = dtype_and_x
helpers.test_frontend_method(
init_input_dtypes=input_dtype,
init_all_as_kwargs_np={
"data": x[0],
},
method_input_dtypes=input_dtype,
method_all_as_kwargs_np={},
frontend_method_data=frontend_method_data,
init_flags=init_flags,
method_flags=method_flags,
frontend=frontend,
on_device=on_device,
)


# fmod
@handle_frontend_method(
class_tree=CLASS_TREE,
Expand Down