Skip to content

Commit

Permalink
Merge branch 'master' into maximum
Browse files Browse the repository at this point in the history
  • Loading branch information
Arwa45 authored Jul 4, 2023
2 parents 8008e82 + 4568a18 commit ed9da82
Show file tree
Hide file tree
Showing 7 changed files with 606 additions and 80 deletions.
26 changes: 11 additions & 15 deletions ivy/functional/backends/jax/linear_algebra.py
Original file line number Diff line number Diff line change
Expand Up @@ -422,21 +422,17 @@ def vector_norm(
) -> JaxArray:
if dtype and x.dtype != dtype:
x = x.astype(dtype)

ret_scalar = False
if x.ndim == 0:
x = jnp.expand_dims(x, 0)
ret_scalar = True

if axis is None:
x = x.reshape([-1])
elif isinstance(axis, list):
axis = tuple(axis)

jnp_normalized_vector = jnp.linalg.norm(x, ord, axis, keepdims)
if ret_scalar:
jnp_normalized_vector = jnp.squeeze(jnp_normalized_vector)
return jnp_normalized_vector
abs_x = jnp.abs(x)
if ord == 0:
return jnp.sum(
(abs_x != 0).astype(abs_x.dtype), axis=axis, keepdims=keepdims, out=out
)
elif ord == inf:
return jnp.max(abs_x, axis=axis, keepdims=keepdims, out=out)
elif ord == -inf:
return jnp.min(abs_x, axis=axis, keepdims=keepdims, out=out)
else:
return jnp.sum(abs_x**ord, axis=axis, keepdims=keepdims) ** (1.0 / ord)


# Extra #
Expand Down
28 changes: 14 additions & 14 deletions ivy/functional/backends/numpy/linear_algebra.py
Original file line number Diff line number Diff line change
Expand Up @@ -378,21 +378,21 @@ def vector_norm(
) -> np.ndarray:
if dtype and x.dtype != dtype:
x = x.astype(dtype)

ret_scalar = False
if x.ndim == 0:
x = np.expand_dims(x, 0)
ret_scalar = True

if axis is None:
x = x.reshape([-1])
elif isinstance(axis, list):
abs_x = np.abs(x)
if isinstance(axis, list):
axis = tuple(axis)

np_normalized_vector = np.linalg.norm(x, ord, axis, keepdims)
if ret_scalar:
np_normalized_vector = np.squeeze(np_normalized_vector)
return np_normalized_vector
if ord == 0:
return np.sum(
(abs_x != 0).astype(abs_x.dtype), axis=axis, keepdims=keepdims, out=out
)
elif ord == inf:
return np.max(abs_x, axis=axis, keepdims=keepdims, out=out)
elif ord == -inf:
return np.min(abs_x, axis=axis, keepdims=keepdims, out=out)
else:
return (
np.sum(abs_x**ord, axis=axis, keepdims=keepdims) ** (1.0 / ord)
).astype(abs_x.dtype)


# Extra #
Expand Down
49 changes: 19 additions & 30 deletions ivy/functional/backends/paddle/linear_algebra.py
Original file line number Diff line number Diff line change
Expand Up @@ -591,37 +591,26 @@ def vector_norm(
dtype: Optional[paddle.dtype] = None,
out: Optional[paddle.Tensor] = None,
) -> paddle.Tensor:
ret_scalar = False
dtype = dtype if dtype is not None else x.dtype
if dtype in ["complex64", "complex128"]:
dtype = "float" + str(ivy.dtype_bits(dtype) // 2)
if x.ndim == 0:
x = paddle_backend.expand_dims(x, axis=0)
ret_scalar = True

if x.dtype in [
paddle.int8,
paddle.int16,
paddle.int32,
paddle.int64,
paddle.uint8,
paddle.float16,
paddle.complex64,
paddle.complex128,
paddle.bool,
]:
if paddle.is_complex(x):
x = paddle.abs(x)
ret = paddle.norm(x, p=ord, axis=axis, keepdim=keepdims).astype(dtype)
else:
ret = paddle.norm(
x.cast("float32"), p=ord, axis=axis, keepdim=keepdims
).astype(dtype)
if dtype and x.dtype != dtype:
x = x.astype(dtype)
abs_x = paddle_backend.abs(x)
if ord == 0:
return paddle_backend.sum(
(abs_x != 0).astype(abs_x.dtype), axis=axis, keepdims=keepdims
)
elif ord == inf:
return paddle_backend.max(abs_x, axis=axis, keepdims=keepdims)
elif ord == -inf:
return paddle_backend.min(abs_x, axis=axis, keepdims=keepdims)
else:
ret = paddle.norm(x, p=ord, axis=axis, keepdim=keepdims).astype(dtype)
if ret_scalar or (x.ndim == 1 and not keepdims):
ret = paddle_backend.squeeze(ret, axis=axis)
return ret
return paddle_backend.pow(
paddle_backend.sum(
paddle_backend.pow(abs_x, ord),
axis=axis,
keepdims=keepdims,
),
(1.0 / ord),
)


# Extra #
Expand Down
26 changes: 8 additions & 18 deletions ivy/functional/backends/tensorflow/linear_algebra.py
Original file line number Diff line number Diff line change
Expand Up @@ -655,25 +655,15 @@ def vector_norm(
) -> Union[tf.Tensor, tf.Variable]:
if dtype and x.dtype != dtype:
x = tf.cast(x, dtype)
# Mathematical Norms
if ord > 0:
tn_normalized_vector = tf.linalg.norm(x, ord, axis, keepdims)
abs_x = tf.abs(x)
if ord == 0:
return tf.reduce_sum(tf.cast(x != 0, abs_x.dtype), axis=axis, keepdims=keepdims)
elif ord == inf:
return tf.reduce_max(abs_x, axis=axis, keepdims=keepdims)
elif ord == -inf:
return tf.reduce_min(abs_x, axis=axis, keepdims=keepdims)
else:
if ord == -float("inf"):
tn_normalized_vector = tf.reduce_min(tf.abs(x), axis, keepdims)
elif ord == 0:
tn_normalized_vector = tf.reduce_sum(
tf.cast(x != 0, x.dtype), axis, keepdims
)
else:
tn_normalized_vector = tf.reduce_sum(tf.abs(x) ** ord, axis, keepdims) ** (
1.0 / ord
)
tn_normalized_vector = tf.cast(
tn_normalized_vector, tn_normalized_vector.dtype.real_dtype
)

return tn_normalized_vector
return tf.reduce_sum(abs_x**ord, axis=axis, keepdims=keepdims) ** (1.0 / ord)


# Extra #
Expand Down
Loading

0 comments on commit ed9da82

Please sign in to comment.