Skip to content

Commit

Permalink
Merge branch 'unifyai:master' into master
Browse files Browse the repository at this point in the history
  • Loading branch information
AbdelrhmanNile authored Jun 3, 2023
2 parents 99b0a59 + c99c03a commit f622e37
Show file tree
Hide file tree
Showing 14 changed files with 55 additions and 14 deletions.
2 changes: 1 addition & 1 deletion ivy/functional/backends/paddle/creation.py
Original file line number Diff line number Diff line change
Expand Up @@ -387,7 +387,7 @@ def _slice_at_axis(sl, axis):


@with_unsupported_device_and_dtypes(
{"2.4.2 and below": {"cpu": ("uint16", "bfloat16")}}, backend_version
{"2.4.2 and below": {"cpu": ("uint16", "bfloat16", "float16")}}, backend_version
)
def linspace(
start: Union[paddle.Tensor, float],
Expand Down
6 changes: 4 additions & 2 deletions ivy/functional/backends/paddle/elementwise.py
Original file line number Diff line number Diff line change
Expand Up @@ -922,6 +922,10 @@ def exp2(
return ivy.pow(2, x)


@with_supported_dtypes(
{"2.4.2 and below": ("float64", "float32", "int64", "int64")},
backend_version,
)
def subtract(
x1: Union[float, paddle.Tensor],
x2: Union[float, paddle.Tensor],
Expand All @@ -931,8 +935,6 @@ def subtract(
out: Optional[paddle.Tensor] = None,
) -> paddle.Tensor:
x1, x2, ret_dtype = _elementwise_helper(x1, x2)
if x1.dtype in [paddle.int8, paddle.uint8, paddle.float16, paddle.bool]:
x1, x2 = x1.astype("float32"), x2.astype("float32")
if alpha not in (1, None):
x2 = paddle_backend.multiply(x2, alpha)
x1, x2 = ivy.promote_types_of_inputs(x1, x2)
Expand Down
1 change: 1 addition & 0 deletions ivy/functional/backends/tensorflow/experimental/layers.py
Original file line number Diff line number Diff line change
Expand Up @@ -549,6 +549,7 @@ def fft(
)
if norm != "backward" and norm != "ortho" and norm != "forward":
raise ivy.utils.exceptions.IvyError(f"Unrecognized normalization mode {norm}")
x = tf.cast(x, tf.complex128)
if x.shape[dim] != n:
s = list(x.shape)
if s[dim] > n:
Expand Down
7 changes: 7 additions & 0 deletions ivy/functional/frontends/jax/numpy/creation.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@
to_ivy_arrays_and_back,
outputs_to_frontend_arrays,
handle_jax_dtype,
inputs_to_ivy_arrays
)

from ivy.func_wrapper import handle_out_argument
Expand Down Expand Up @@ -243,3 +244,9 @@ def compress(condition, a, *, axis=None, out=None):
)
arr = arr[: condition_arr.shape[0]]
return ivy.moveaxis(arr[condition_arr], 0, axis)


@inputs_to_ivy_arrays
def iterable(y):
return hasattr(y, "__iter__") and y.ndim > 0

Original file line number Diff line number Diff line change
Expand Up @@ -75,7 +75,7 @@ def fftshift(x, axes=None):
return roll


@with_unsupported_dtypes({"1.9.0 and below": ("float16",)}, "torch")
@with_unsupported_dtypes({"1.24.3 and below": ("float16",)}, "numpy")
@to_ivy_arrays_and_back
def rfft(a, n=None, axis=-1, norm=None):
if norm is None:
Expand All @@ -84,18 +84,17 @@ def rfft(a, n=None, axis=-1, norm=None):
return ivy.dft(a, axis=axis, inverse=False, onesided=True, dft_length=n, norm=norm)


@with_unsupported_dtypes({"1.24.3 and below": ("float16",)}, "numpy")
@to_ivy_arrays_and_back
@with_unsupported_dtypes({"1.12.0 and below": ("float16",)}, "numpy")
def ihfft(a, n=None, axis=-1, norm=None):
a = ivy.array(a, dtype=ivy.float64)
if n is None:
n = a.shape[axis]
norm = _swap_direction(norm)
output = ivy.conj(rfft(a, n, axis, norm=norm).ivy_array)
return output


@with_unsupported_dtypes({"2.4.2 and below": ("int",)}, "paddle")
@with_unsupported_dtypes({"1.24.3 and below": ("int",)}, "numpy")
@to_ivy_arrays_and_back
def fftfreq(n, d=1.0):
if not isinstance(
Expand Down
2 changes: 1 addition & 1 deletion ivy/functional/frontends/torch/tensor.py
Original file line number Diff line number Diff line change
Expand Up @@ -920,7 +920,7 @@ def is_complex(self):

def addr(self, vec1, vec2, *, beta=1, alpha=1, out=None):
return torch_frontend.addr(self, vec1, vec2, beta=beta, alpha=alpha, out=out)

def addr_(self, vec1, vec2, *, beta=1, alpha=1):
self.ivy_array = self.addr(vec1, vec2, beta=beta, alpha=alpha).ivy_array
return self
Expand Down
2 changes: 1 addition & 1 deletion ivy/functional/ivy/data_type.py
Original file line number Diff line number Diff line change
Expand Up @@ -1426,7 +1426,7 @@ def default_complex_dtype(
input
Number or array for inferring the complex dtype.
complex_dtype
The float dtype to be returned.
The complex dtype to be returned.
as_native
Whether to return the complex dtype as native dtype.
Expand Down
6 changes: 3 additions & 3 deletions ivy/functional/ivy/experimental/layers.py
Original file line number Diff line number Diff line change
Expand Up @@ -1006,7 +1006,7 @@ def embedding(
@handle_exceptions
@handle_nestable
@handle_out_argument
@to_native_arrays_and_back
@inputs_to_ivy_arrays
def dft(
x: Union[ivy.Array, ivy.NativeArray],
/,
Expand Down Expand Up @@ -1070,9 +1070,9 @@ def dft(
The signal_dim at the specified axis is equal to the dft_length.
"""
if inverse:
res = ifft(x, axis, norm=norm, n=dft_length, out=out)
res = ivy.ifft(x, axis, norm=norm, n=dft_length, out=out)
else:
res = fft(x, axis, norm=norm, n=dft_length, out=out)
res = ivy.fft(x, axis, norm=norm, n=dft_length, out=out)

if onesided:
slices = [slice(0, a) for a in res.shape]
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -923,3 +923,28 @@ def test_jax_numpy_compress(
a=arr[0],
axis=ax,
)


@handle_frontend_test(
fn_tree="jax.numpy.iterable",
dtype_and_x=helpers.dtype_and_values(
available_dtypes=helpers.get_dtypes("valid"),
),
)
def test_jax_numpy_iterable(
dtype_and_x,
test_flags,
frontend,
fn_tree,
on_device,
):
dtype, x = dtype_and_x
helpers.test_frontend_function(
input_dtypes=dtype,
frontend=frontend,
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
y=x[0],
)

Original file line number Diff line number Diff line change
@@ -1,6 +1,9 @@
# global
from hypothesis import strategies as st

# local
import ivy_tests.test_ivy.helpers as helpers
from ivy_tests.test_ivy.helpers import handle_frontend_test
from hypothesis import strategies as st


@handle_frontend_test(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -172,6 +172,8 @@ def test_numpy_divide(
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
atol=1e-03,
rtol=1e-03,
x1=xs[0],
x2=xs[1],
out=None,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,7 @@ def test_numpy_cos(
test_flags=test_flags,
fn_tree=fn_tree,
on_device=on_device,
atol=1e-03,
x=x[0],
out=None,
where=where,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -8823,4 +8823,4 @@ def test_torch_instance_addr_(
frontend=frontend,
atol_=1e-02,
on_device=on_device,
)
)
1 change: 1 addition & 0 deletions requirements/requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -8,3 +8,4 @@ nvidia-ml-py<=11.495.46 # mod_name=pynvml
diskcache
google-auth # mod_name=google.auth
requests
pyvis

0 comments on commit f622e37

Please sign in to comment.