Skip to content

Commit

Permalink
Merge Paddle branch with master (#13196)
Browse files Browse the repository at this point in the history
Co-authored-by: Zaeem Ansari <99063526+zaeemansari70@users.noreply.github.com>
Co-authored-by: avinashyadav0027 <79210315+avinashyadav0027@users.noreply.github.com>
Co-authored-by: Amitesh Vatsa <96468536+vtsamit@users.noreply.github.com>
Co-authored-by: czirabence <46842235+czirabence@users.noreply.github.com>
Co-authored-by: Ved Patwardhan <54766411+VedPatwardhan@users.noreply.github.com>
  • Loading branch information
6 people authored Mar 27, 2023
1 parent d2190bd commit c829a54
Show file tree
Hide file tree
Showing 49 changed files with 6,944 additions and 15 deletions.
11 changes: 9 additions & 2 deletions ivy/data_classes/container/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -3673,7 +3673,14 @@ def __repr__(self, as_repr=True):
indent_str = " " * self._print_indent

def _align_array(array_str_in):
array_str_in_split = array_str_in.split("([")
split_phrase_dict = {'': "([",
'jax': "([",
'numpy': "([",
'tensorflow': "([",
'pytorch': "([",
'paddle': "])"}
split_phrase = split_phrase_dict[self._cont_ivy.current_backend_str()]
array_str_in_split = array_str_in.split(split_phrase)
leading_str_to_keep = array_str_in_split[0].replace("\\n", "")
indented_key_size = len(leading_str_to_keep.replace('"', "").split(": ")[0])
indented_key_str = " " * (indented_key_size + 2)
Expand All @@ -3699,7 +3706,7 @@ def _pre_pad_alpha_line(str_in):
num_extra_dims = i
break
extra_indent = (len(leading_str) + 1 + num_extra_dims) * " "
array_str_in = "([".join([leading_str_to_keep, remaining_str])
array_str_in = split_phrase.join([leading_str_to_keep, remaining_str])
uniform_indent_wo_overflow = array_str_in.replace(
"\\n[", "\n" + local_indent_str + extra_indent + "["
)
Expand Down
156 changes: 156 additions & 0 deletions ivy/functional/backends/paddle/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,156 @@
# global
import sys
import paddle as paddle

# local
import ivy

backend_version = {"version": paddle.version.full_version}

# noinspection PyUnresolvedReferences
if not ivy.is_local():
_module_in_memory = sys.modules[__name__]
else:
_module_in_memory = sys.modules[ivy.import_module_path].import_cache[__name__]

use = ivy.utils.backend.ContextManager(_module_in_memory)

NativeArray = paddle.Tensor
NativeVariable = paddle.static.Variable # paddle.fluid.framework.Variable
NativeDevice = paddle.fluid.libpaddle.Place
NativeDtype = paddle.dtype
NativeShape = list

NativeSparseArray = paddle.Tensor

# devices
valid_devices = ("cpu",)

invalid_devices = ("gpu", "tpu")


# native data types
native_int8 = paddle.int8
native_int16 = paddle.int16
native_int32 = paddle.int32
native_int64 = paddle.int64
native_uint8 = paddle.uint8
native_bfloat16 = paddle.bfloat16
native_float16 = paddle.float16
native_float32 = paddle.float32
native_float64 = paddle.float64
native_complex64 = paddle.complex64
native_complex128 = paddle.complex128
native_double = native_float64
native_bool = paddle.bool

# valid data types
# ToDo: Add complex dtypes to valid_dtypes and fix all resulting failures.
valid_dtypes = (
ivy.int8,
ivy.int16,
ivy.int32,
ivy.int64,
ivy.uint8,
ivy.bfloat16,
ivy.float16,
ivy.float32,
ivy.float64,
ivy.complex64,
ivy.complex128,
ivy.bool,
)
valid_numeric_dtypes = (
ivy.int8,
ivy.int16,
ivy.int32,
ivy.int64,
ivy.uint8,
ivy.float16,
ivy.float32,
ivy.float64,
)
valid_int_dtypes = (
ivy.int8,
ivy.int16,
ivy.int32,
ivy.int64,
ivy.uint8,
)
valid_float_dtypes = (ivy.float16, ivy.float32, ivy.float64)
valid_uint_dtypes = (ivy.uint8,)
valid_complex_dtypes = (ivy.complex64, ivy.complex128)

invalid_dtypes = (
ivy.uint16,
ivy.uint32,
ivy.uint64,
ivy.bfloat16,
)
invalid_numeric_dtypes = (
ivy.uint16,
ivy.uint32,
ivy.uint64,
ivy.bfloat16,
)
invalid_int_dtypes = (ivy.uint16, ivy.uint32, ivy.uint64)
invalid_float_dtypes = (ivy.bfloat16,)
invalid_uint_dtypes = (ivy.uint16, ivy.uint32, ivy.uint64)
invalid_complex_dtypes = ()

native_inplace_support = False
supports_gradients = True


def closest_valid_dtype(type=None, /, as_native=False):
if type is None:
return ivy.default_dtype()
if isinstance(type, str) and type in invalid_dtypes:
type = {
"uint16": native_uint8,
"uint32": native_uint8,
"uint64": native_uint8,
"bfloat16": native_float16,
}[type]
return ivy.as_ivy_dtype(type) if not as_native else ivy.as_native_dtype(type)


backend = "paddle"

# local sub-modules
from . import activations
from .activations import *
from . import creation
from .creation import *
from . import data_type
from .data_type import *
from . import device
from .device import *
from . import elementwise
from .elementwise import *
from . import general
from .general import *
from . import gradients
from .gradients import *
from . import layers
from .layers import *
from . import linear_algebra as linalg
from .linear_algebra import *
from . import manipulation
from .manipulation import *
from . import random
from .random import *
from . import searching
from .searching import *
from . import set
from .set import *
from . import sorting
from .sorting import *
from . import statistical
from .statistical import *
from . import utility
from .utility import *
from . import experimental
from .experimental import *
from . import control_flow_ops
from .control_flow_ops import *
171 changes: 171 additions & 0 deletions ivy/functional/backends/paddle/activations.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,171 @@
"""Collection of Paddle activation functions, wrapped to fit Ivy syntax and
signature.
"""
from typing import Optional, Union

# global
import paddle
import paddle.nn.functional as F

# local
import ivy
from ivy.func_wrapper import with_unsupported_device_and_dtypes
from . import backend_version

unsupported_dtypes = [
"int8",
"int16",
"int32",
"int64",
"uint8",
"float16",
"complex64",
"complex128",
"bool",
]
default_float = ivy.default_float_dtype()


@with_unsupported_device_and_dtypes(
{"2.4.2 and below": {"cpu": ("uint16", "bfloat16")}}, backend_version
)
def relu(x: paddle.Tensor, /, *, out: Optional[paddle.Tensor] = None) -> paddle.Tensor:
if ivy.as_ivy_dtype(x.dtype) in unsupported_dtypes:
if paddle.is_complex(x):
return F.relu(x.real()) + 1j * F.relu(x.imag())
return F.relu(x.cast(default_float)).cast(x.dtype)
return F.relu(x)


@with_unsupported_device_and_dtypes(
{"2.4.2 and below": {"cpu": ("uint16", "bfloat16")}}, backend_version
)
def leaky_relu(
x: paddle.Tensor,
/,
*,
alpha: float = 0.2,
out: Optional[paddle.Tensor] = None,
) -> paddle.Tensor:
if ivy.as_ivy_dtype(x.dtype) in unsupported_dtypes:
if paddle.is_complex(x):
return F.leaky_relu(x.real(), negative_slope=alpha) + 1j * F.leaky_relu(
x.imag(), negative_slope=alpha
)
return F.leaky_relu(x.cast(default_float), negative_slope=alpha).cast(x.dtype)
return F.leaky_relu(x, negative_slope=alpha)


@with_unsupported_device_and_dtypes(
{"2.4.2 and below": {"cpu": ("uint16", "bfloat16")}}, backend_version
)
def gelu(
x: paddle.Tensor,
/,
*,
approximate: bool = False,
out: Optional[paddle.Tensor] = None,
) -> paddle.Tensor:
if ivy.as_ivy_dtype(x.dtype) in unsupported_dtypes:
if paddle.is_complex(x):
if approximate:
return (
0.5 * x * (1 + ivy.tanh(0.7978845608 * (x + 0.044715 * x * x * x)))
)
return 0.5 * x * (1 + ivy.erf(x / ivy.sqrt(2)))
return F.gelu(x.cast(default_float), approximate=approximate).cast(x.dtype)
return F.gelu(x, approximate=approximate)


@with_unsupported_device_and_dtypes(
{"2.4.2 and below": {"cpu": ("uint16", "bfloat16")}}, backend_version
)
def sigmoid(
x: paddle.Tensor, /, *, out: Optional[paddle.Tensor] = None
) -> paddle.Tensor:
if ivy.as_ivy_dtype(x.dtype) in unsupported_dtypes:
if paddle.is_complex(x):
return 1 / (1 + ivy.exp(x))
return F.sigmoid(x.cast(default_float)).cast(x.dtype)
return F.sigmoid(x)


@with_unsupported_device_and_dtypes(
{"2.4.2 and below": {"cpu": ("uint16", "bfloat16")}}, backend_version
)
def softmax(
x: paddle.Tensor,
/,
*,
axis: Optional[int] = None,
out: Optional[paddle.Tensor] = None,
) -> paddle.Tensor:
if axis is None:
axis = -1
exp_x = ivy.exp(ivy.array(x) - ivy.max(x, axis=axis, keepdims=True))
return ivy.divide(exp_x, ivy.sum(exp_x, axis=axis, keepdims=True))


@with_unsupported_device_and_dtypes(
{"2.4.2 and below": {"cpu": ("uint16", "bfloat16")}}, backend_version
)
def softplus(
x: paddle.Tensor,
/,
*,
beta: Optional[Union[int, float]] = None,
threshold: Optional[Union[int, float]] = None,
out: Optional[paddle.Tensor] = None,
) -> paddle.Tensor:
if beta is not None and beta != 1:
x_beta = x * beta
res = (
ivy.add(
ivy.log1p(ivy.exp(-ivy.abs(x_beta))),
ivy.maximum(x_beta, 0),
)
) / beta
else:
x_beta = x
res = ivy.add(
ivy.log1p(ivy.exp(-ivy.abs(x_beta))),
ivy.maximum(x_beta, 0),
)
if threshold is not None:
return ivy.where(x_beta > threshold, x, res).astype(x.dtype)
return res.astype(x.dtype)


@with_unsupported_device_and_dtypes(
{"2.4.2 and below": {"cpu": ("uint16", "bfloat16")}}, backend_version
)
def log_softmax(
x: paddle.Tensor,
/,
*,
axis: Optional[int] = None,
out: Optional[paddle.Tensor] = None,
):
x = ivy.array(x)
x_max = ivy.max(x, axis=axis, keepdims=True)
if x_max.ndim > 0:
x_max[~ivy.isfinite(x_max)] = 0
elif not ivy.isfinite(x_max):
x_max = 0
exp_tmp = ivy.exp(x - x_max)

s = ivy.sum(exp_tmp, axis=axis, keepdims=True)
ret = ivy.log(s)
ret = x - x_max - ret
return ret


@with_unsupported_device_and_dtypes(
{"2.4.2 and below": {"cpu": ("uint16", "bfloat16")}}, backend_version
)
def mish(x: paddle.Tensor, /, *, out: Optional[paddle.Tensor] = None) -> paddle.Tensor:
if ivy.as_ivy_dtype(x.dtype) in unsupported_dtypes:
if paddle.is_complex(x):
return x * ivy.tanh(ivy.log1p(ivy.exp(x)))
return F.mish(x.cast(default_float)).cast(x.dtype)
return F.mish(x)
16 changes: 16 additions & 0 deletions ivy/functional/backends/paddle/control_flow_ops.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,16 @@
# def if_exp(cond, if_true, if_false):
# return if_true() if cond else if_false()


def if_else(cond, body_fn, orelse_fn, vars):
if cond:
return body_fn(*vars)
else:
return orelse_fn(*vars)


def while_loop(test_fn, body_fn, vars):
result = vars
while test_fn(*result):
result = body_fn(*result)
return result
Loading

0 comments on commit c829a54

Please sign in to comment.