Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merge Paddle branch with master #13196

Merged
merged 46 commits into from
Mar 27, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
46 commits
Select commit Hold shift + click to select a range
9c9a927
initial commit after sync with master
MahmoudAshraf97 Mar 22, 2023
af149d7
fix closest_valid_dtype
MahmoudAshraf97 Mar 22, 2023
e6bb157
added paddle to test_dtype
MahmoudAshraf97 Mar 22, 2023
dc42595
added paddle to test_general
MahmoudAshraf97 Mar 22, 2023
4a00ce1
added unsupported dtypes for cholesky, added more dtypes support for …
MahmoudAshraf97 Mar 22, 2023
f29ac38
roll()
zaeemansari70 Mar 22, 2023
e7edcf3
adapted container.__repr__ method to support paddle
MahmoudAshraf97 Mar 23, 2023
f530253
Merge branch 'PaddlePaddle' of https://github.com/unifyai/ivy into Pa…
MahmoudAshraf97 Mar 23, 2023
4e364cd
paddle/master sync 23/3/2023 (#13005)
MahmoudAshraf97 Mar 23, 2023
09c55ce
fixes for bitwise elementwise funcs to handle more cases
MahmoudAshraf97 Mar 23, 2023
7c8c430
update the arguments of inplace_update
MahmoudAshraf97 Mar 23, 2023
e206509
added missing copy args, fixed flip when axis=none, used ivy.ArrayMod…
MahmoudAshraf97 Mar 23, 2023
87f3850
Merge branch 'PaddlePaddle' of https://github.com/unifyai/ivy into Pa…
MahmoudAshraf97 Mar 23, 2023
05d8d21
fixed argmin, argmax to support more dypes, fixed typo in nonzero, mi…
MahmoudAshraf97 Mar 23, 2023
55f8034
switched to array mode in broadcast arrays,broadcast_to, minor format…
MahmoudAshraf97 Mar 23, 2023
a0c8859
updated elementwise helper to use array mode
MahmoudAshraf97 Mar 23, 2023
690f264
improve _differentiable_linspace and used ArrayMode
MahmoudAshraf97 Mar 24, 2023
6e3b45f
minor fix for unstack
MahmoudAshraf97 Mar 24, 2023
69cb70c
added constant_pad to paddle backend (#13013)
avinashyadav0027 Mar 24, 2023
7b226a7
added zero pad
MahmoudAshraf97 Mar 24, 2023
9606c27
Solve master merge conflict
MahmoudAshraf97 Mar 24, 2023
706868b
Revert "Solve master merge conflict"
MahmoudAshraf97 Mar 24, 2023
b1f812c
fixed segmentation error for repeat and unstack
MahmoudAshraf97 Mar 24, 2023
5fdc51a
fixed stack to allow gradients
MahmoudAshraf97 Mar 24, 2023
d728c64
fixed tile behaviour for 0 in repeats
MahmoudAshraf97 Mar 24, 2023
270acce
fixed gradients in clip, added more dtypes to roll
MahmoudAshraf97 Mar 24, 2023
120363c
improved cardinality tests for copy_array
MahmoudAshraf97 Mar 24, 2023
a256584
fixed matrix_rank, inner, inv, matmul, matrix_rank, inner, outer, svd…
MahmoudAshraf97 Mar 25, 2023
d1d7ae4
Adding PaddlePaddle to general functions (#12913)
vtsamit Mar 25, 2023
e82726a
Adding general function in PaddlePaddle (#12914)
vtsamit Mar 25, 2023
cd9fc94
added more dtypes and fixes to pass the tests for min, max, std, var,…
MahmoudAshraf97 Mar 26, 2023
4df24ca
Merge branch 'PaddlePaddle' of https://github.com/unifyai/ivy into Pa…
MahmoudAshraf97 Mar 26, 2023
742ff5e
added gather function to paddle backend (#12950)
czirabence Mar 26, 2023
f342956
reduce unrelated changes 1
MahmoudAshraf97 Mar 26, 2023
a2a7f63
Reset changes to readme
vedpatwardhan Mar 27, 2023
a4fa413
Reset to README.rst
vedpatwardhan Mar 27, 2023
a9d1fb7
Reset changes to multiple files
vedpatwardhan Mar 27, 2023
df397a3
Reset changes to so files
vedpatwardhan Mar 27, 2023
8ecb8a3
More resets to so files
vedpatwardhan Mar 27, 2023
7fbec0c
Temporarily deleted so files
vedpatwardhan Mar 27, 2023
aeb18c4
Added back the so files
vedpatwardhan Mar 27, 2023
9a2fb34
Reset changes to few more files
vedpatwardhan Mar 27, 2023
3d31669
Trying to reset the VVI.so
vedpatwardhan Mar 27, 2023
bb092fa
Merge master into PaddlePaddle
vedpatwardhan Mar 27, 2023
8550903
Added back VVI.so
vedpatwardhan Mar 27, 2023
fa80c09
Adding inplace_variables_supported (#13202)
vtsamit Mar 27, 2023
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
11 changes: 9 additions & 2 deletions ivy/data_classes/container/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -3673,7 +3673,14 @@ def __repr__(self, as_repr=True):
indent_str = " " * self._print_indent

def _align_array(array_str_in):
array_str_in_split = array_str_in.split("([")
split_phrase_dict = {'': "([",
'jax': "([",
'numpy': "([",
'tensorflow': "([",
'pytorch': "([",
'paddle': "])"}
split_phrase = split_phrase_dict[self._cont_ivy.current_backend_str()]
array_str_in_split = array_str_in.split(split_phrase)
leading_str_to_keep = array_str_in_split[0].replace("\\n", "")
indented_key_size = len(leading_str_to_keep.replace('"', "").split(": ")[0])
indented_key_str = " " * (indented_key_size + 2)
Expand All @@ -3699,7 +3706,7 @@ def _pre_pad_alpha_line(str_in):
num_extra_dims = i
break
extra_indent = (len(leading_str) + 1 + num_extra_dims) * " "
array_str_in = "([".join([leading_str_to_keep, remaining_str])
array_str_in = split_phrase.join([leading_str_to_keep, remaining_str])
uniform_indent_wo_overflow = array_str_in.replace(
"\\n[", "\n" + local_indent_str + extra_indent + "["
)
Expand Down
156 changes: 156 additions & 0 deletions ivy/functional/backends/paddle/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,156 @@
# global
import sys
import paddle as paddle

# local
import ivy

backend_version = {"version": paddle.version.full_version}

# noinspection PyUnresolvedReferences
if not ivy.is_local():
_module_in_memory = sys.modules[__name__]
else:
_module_in_memory = sys.modules[ivy.import_module_path].import_cache[__name__]

use = ivy.utils.backend.ContextManager(_module_in_memory)

NativeArray = paddle.Tensor
NativeVariable = paddle.static.Variable # paddle.fluid.framework.Variable
NativeDevice = paddle.fluid.libpaddle.Place
NativeDtype = paddle.dtype
NativeShape = list

NativeSparseArray = paddle.Tensor

# devices
valid_devices = ("cpu",)

invalid_devices = ("gpu", "tpu")


# native data types
native_int8 = paddle.int8
native_int16 = paddle.int16
native_int32 = paddle.int32
native_int64 = paddle.int64
native_uint8 = paddle.uint8
native_bfloat16 = paddle.bfloat16
native_float16 = paddle.float16
native_float32 = paddle.float32
native_float64 = paddle.float64
native_complex64 = paddle.complex64
native_complex128 = paddle.complex128
native_double = native_float64
native_bool = paddle.bool

# valid data types
# ToDo: Add complex dtypes to valid_dtypes and fix all resulting failures.
valid_dtypes = (
ivy.int8,
ivy.int16,
ivy.int32,
ivy.int64,
ivy.uint8,
ivy.bfloat16,
ivy.float16,
ivy.float32,
ivy.float64,
ivy.complex64,
ivy.complex128,
ivy.bool,
)
valid_numeric_dtypes = (
ivy.int8,
ivy.int16,
ivy.int32,
ivy.int64,
ivy.uint8,
ivy.float16,
ivy.float32,
ivy.float64,
)
valid_int_dtypes = (
ivy.int8,
ivy.int16,
ivy.int32,
ivy.int64,
ivy.uint8,
)
valid_float_dtypes = (ivy.float16, ivy.float32, ivy.float64)
valid_uint_dtypes = (ivy.uint8,)
valid_complex_dtypes = (ivy.complex64, ivy.complex128)

invalid_dtypes = (
ivy.uint16,
ivy.uint32,
ivy.uint64,
ivy.bfloat16,
)
invalid_numeric_dtypes = (
ivy.uint16,
ivy.uint32,
ivy.uint64,
ivy.bfloat16,
)
invalid_int_dtypes = (ivy.uint16, ivy.uint32, ivy.uint64)
invalid_float_dtypes = (ivy.bfloat16,)
invalid_uint_dtypes = (ivy.uint16, ivy.uint32, ivy.uint64)
invalid_complex_dtypes = ()

native_inplace_support = False
supports_gradients = True


def closest_valid_dtype(type=None, /, as_native=False):
if type is None:
return ivy.default_dtype()
if isinstance(type, str) and type in invalid_dtypes:
type = {
"uint16": native_uint8,
"uint32": native_uint8,
"uint64": native_uint8,
"bfloat16": native_float16,
}[type]
return ivy.as_ivy_dtype(type) if not as_native else ivy.as_native_dtype(type)


backend = "paddle"

# local sub-modules
from . import activations
from .activations import *
from . import creation
from .creation import *
from . import data_type
from .data_type import *
from . import device
from .device import *
from . import elementwise
from .elementwise import *
from . import general
from .general import *
from . import gradients
from .gradients import *
from . import layers
from .layers import *
from . import linear_algebra as linalg
from .linear_algebra import *
from . import manipulation
from .manipulation import *
from . import random
from .random import *
from . import searching
from .searching import *
from . import set
from .set import *
from . import sorting
from .sorting import *
from . import statistical
from .statistical import *
from . import utility
from .utility import *
from . import experimental
from .experimental import *
from . import control_flow_ops
from .control_flow_ops import *
171 changes: 171 additions & 0 deletions ivy/functional/backends/paddle/activations.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,171 @@
"""Collection of Paddle activation functions, wrapped to fit Ivy syntax and
signature.
"""
from typing import Optional, Union

# global
import paddle
import paddle.nn.functional as F

# local
import ivy
from ivy.func_wrapper import with_unsupported_device_and_dtypes
from . import backend_version

unsupported_dtypes = [
"int8",
"int16",
"int32",
"int64",
"uint8",
"float16",
"complex64",
"complex128",
"bool",
]
default_float = ivy.default_float_dtype()


@with_unsupported_device_and_dtypes(
{"2.4.2 and below": {"cpu": ("uint16", "bfloat16")}}, backend_version
)
def relu(x: paddle.Tensor, /, *, out: Optional[paddle.Tensor] = None) -> paddle.Tensor:
if ivy.as_ivy_dtype(x.dtype) in unsupported_dtypes:
if paddle.is_complex(x):
return F.relu(x.real()) + 1j * F.relu(x.imag())
return F.relu(x.cast(default_float)).cast(x.dtype)
return F.relu(x)


@with_unsupported_device_and_dtypes(
{"2.4.2 and below": {"cpu": ("uint16", "bfloat16")}}, backend_version
)
def leaky_relu(
x: paddle.Tensor,
/,
*,
alpha: float = 0.2,
out: Optional[paddle.Tensor] = None,
) -> paddle.Tensor:
if ivy.as_ivy_dtype(x.dtype) in unsupported_dtypes:
if paddle.is_complex(x):
return F.leaky_relu(x.real(), negative_slope=alpha) + 1j * F.leaky_relu(
x.imag(), negative_slope=alpha
)
return F.leaky_relu(x.cast(default_float), negative_slope=alpha).cast(x.dtype)
return F.leaky_relu(x, negative_slope=alpha)


@with_unsupported_device_and_dtypes(
{"2.4.2 and below": {"cpu": ("uint16", "bfloat16")}}, backend_version
)
def gelu(
x: paddle.Tensor,
/,
*,
approximate: bool = False,
out: Optional[paddle.Tensor] = None,
) -> paddle.Tensor:
if ivy.as_ivy_dtype(x.dtype) in unsupported_dtypes:
if paddle.is_complex(x):
if approximate:
return (
0.5 * x * (1 + ivy.tanh(0.7978845608 * (x + 0.044715 * x * x * x)))
)
return 0.5 * x * (1 + ivy.erf(x / ivy.sqrt(2)))
return F.gelu(x.cast(default_float), approximate=approximate).cast(x.dtype)
return F.gelu(x, approximate=approximate)


@with_unsupported_device_and_dtypes(
{"2.4.2 and below": {"cpu": ("uint16", "bfloat16")}}, backend_version
)
def sigmoid(
x: paddle.Tensor, /, *, out: Optional[paddle.Tensor] = None
) -> paddle.Tensor:
if ivy.as_ivy_dtype(x.dtype) in unsupported_dtypes:
if paddle.is_complex(x):
return 1 / (1 + ivy.exp(x))
return F.sigmoid(x.cast(default_float)).cast(x.dtype)
return F.sigmoid(x)


@with_unsupported_device_and_dtypes(
{"2.4.2 and below": {"cpu": ("uint16", "bfloat16")}}, backend_version
)
def softmax(
x: paddle.Tensor,
/,
*,
axis: Optional[int] = None,
out: Optional[paddle.Tensor] = None,
) -> paddle.Tensor:
if axis is None:
axis = -1
exp_x = ivy.exp(ivy.array(x) - ivy.max(x, axis=axis, keepdims=True))
return ivy.divide(exp_x, ivy.sum(exp_x, axis=axis, keepdims=True))


@with_unsupported_device_and_dtypes(
{"2.4.2 and below": {"cpu": ("uint16", "bfloat16")}}, backend_version
)
def softplus(
x: paddle.Tensor,
/,
*,
beta: Optional[Union[int, float]] = None,
threshold: Optional[Union[int, float]] = None,
out: Optional[paddle.Tensor] = None,
) -> paddle.Tensor:
if beta is not None and beta != 1:
x_beta = x * beta
res = (
ivy.add(
ivy.log1p(ivy.exp(-ivy.abs(x_beta))),
ivy.maximum(x_beta, 0),
)
) / beta
else:
x_beta = x
res = ivy.add(
ivy.log1p(ivy.exp(-ivy.abs(x_beta))),
ivy.maximum(x_beta, 0),
)
if threshold is not None:
return ivy.where(x_beta > threshold, x, res).astype(x.dtype)
return res.astype(x.dtype)


@with_unsupported_device_and_dtypes(
{"2.4.2 and below": {"cpu": ("uint16", "bfloat16")}}, backend_version
)
def log_softmax(
x: paddle.Tensor,
/,
*,
axis: Optional[int] = None,
out: Optional[paddle.Tensor] = None,
):
x = ivy.array(x)
x_max = ivy.max(x, axis=axis, keepdims=True)
if x_max.ndim > 0:
x_max[~ivy.isfinite(x_max)] = 0
elif not ivy.isfinite(x_max):
x_max = 0
exp_tmp = ivy.exp(x - x_max)

s = ivy.sum(exp_tmp, axis=axis, keepdims=True)
ret = ivy.log(s)
ret = x - x_max - ret
return ret


@with_unsupported_device_and_dtypes(
{"2.4.2 and below": {"cpu": ("uint16", "bfloat16")}}, backend_version
)
def mish(x: paddle.Tensor, /, *, out: Optional[paddle.Tensor] = None) -> paddle.Tensor:
if ivy.as_ivy_dtype(x.dtype) in unsupported_dtypes:
if paddle.is_complex(x):
return x * ivy.tanh(ivy.log1p(ivy.exp(x)))
return F.mish(x.cast(default_float)).cast(x.dtype)
return F.mish(x)
16 changes: 16 additions & 0 deletions ivy/functional/backends/paddle/control_flow_ops.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,16 @@
# def if_exp(cond, if_true, if_false):
# return if_true() if cond else if_false()


def if_else(cond, body_fn, orelse_fn, vars):
if cond:
return body_fn(*vars)
else:
return orelse_fn(*vars)


def while_loop(test_fn, body_fn, vars):
result = vars
while test_fn(*result):
result = body_fn(*result)
return result
Loading