Skip to content

Commit

Permalink
[test] Parametrize test_ad_basics.py (#3129)
Browse files Browse the repository at this point in the history
* [test] Fix vulkan for test_trigonometric

* Auto Format

* fix

* fix

Co-authored-by: Taichi Gardener <taichigardener@gmail.com>
  • Loading branch information
k-ye and taichi-gardener committed Oct 9, 2021
1 parent ba1c165 commit e68cfbd
Showing 1 changed file with 105 additions and 75 deletions.
180 changes: 105 additions & 75 deletions tests/python/test_ad_basics.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,7 @@
import functools

import pytest

import taichi as ti
from taichi import approx

Expand All @@ -12,6 +16,8 @@


def if_has_autograd(func):
# functools.wraps is nececssary for pytest parametrization to work
@functools.wraps(func)
def wrapper(*args, **kwargs):
if has_autograd:
func(*args, **kwargs)
Expand All @@ -20,35 +26,29 @@ def wrapper(*args, **kwargs):


# Note: test happens at v = 0.2
def grad_test(tifunc, npfunc=None, default_fp=ti.f32):
if npfunc is None:
npfunc = tifunc

@ti.test(require=ti.extension.data64 if default_fp == ti.f64 else [],
default_fp=default_fp)
def impl():
print(f'arch={ti.cfg.arch} default_fp={ti.cfg.default_fp}')
x = ti.field(default_fp)
y = ti.field(default_fp)
def grad_test(tifunc, npfunc=None):
npfunc = npfunc or tifunc

ti.root.dense(ti.i, 1).place(x, x.grad, y, y.grad)
print(f'arch={ti.cfg.arch} default_fp={ti.cfg.default_fp}')
x = ti.field(ti.cfg.default_fp)
y = ti.field(ti.cfg.default_fp)

@ti.kernel
def func():
for i in x:
y[i] = tifunc(x[i])
ti.root.dense(ti.i, 1).place(x, x.grad, y, y.grad)

v = 0.234
@ti.kernel
def func():
for i in x:
y[i] = tifunc(x[i])

y.grad[0] = 1
x[0] = v
func()
func.grad()
v = 0.234

assert y[0] == approx(npfunc(v), rel=1e-4)
assert x.grad[0] == approx(grad(npfunc)(v), rel=1e-4)
y.grad[0] = 1
x[0] = v
func()
func.grad()

impl()
assert y[0] == approx(npfunc(v), rel=1e-4)
assert x.grad[0] == approx(grad(npfunc)(v), rel=1e-4)


@if_has_autograd
Expand All @@ -62,54 +62,72 @@ def test_size1():
assert x[0] == 1


@pytest.mark.parametrize('tifunc', [
lambda x: x,
lambda x: -x,
lambda x: x * x,
lambda x: x**2,
lambda x: x * x * x,
lambda x: x * x * x * x,
lambda x: 0.4 * x * x - 3,
lambda x: (x - 3) * (x - 1),
lambda x: (x - 3) * (x - 1) + x * x,
])
@if_has_autograd
def test_poly():
grad_test(lambda x: x)
grad_test(lambda x: -x)
grad_test(lambda x: x * x)
grad_test(lambda x: x**2)
grad_test(lambda x: x * x * x)
grad_test(lambda x: x * x * x * x)
grad_test(lambda x: 0.4 * x * x - 3)
grad_test(lambda x: (x - 3) * (x - 1))
grad_test(lambda x: (x - 3) * (x - 1) + x * x)
@ti.test()
def test_poly(tifunc):
grad_test(tifunc)


@pytest.mark.parametrize('tifunc,npfunc', [
(lambda x: ti.tanh(x), lambda x: np.tanh(x)),
(lambda x: ti.sin(x), lambda x: np.sin(x)),
(lambda x: ti.cos(x), lambda x: np.cos(x)),
(lambda x: ti.acos(x), lambda x: np.arccos(x)),
(lambda x: ti.asin(x), lambda x: np.arcsin(x)),
])
@if_has_autograd
@ti.test(exclude=[ti.vulkan])
def test_trigonometric():
grad_test(lambda x: ti.tanh(x), lambda x: np.tanh(x))
grad_test(lambda x: ti.sin(x), lambda x: np.sin(x))
grad_test(lambda x: ti.cos(x), lambda x: np.cos(x))
grad_test(lambda x: ti.acos(x), lambda x: np.arccos(x))
grad_test(lambda x: ti.asin(x), lambda x: np.arcsin(x))
def test_trigonometric(tifunc, npfunc):
grad_test(tifunc, npfunc)


@pytest.mark.parametrize('tifunc', [
lambda x: 1 / x,
lambda x: (x + 1) / (x - 1),
lambda x: (x + 1) * (x + 2) / ((x - 1) * (x + 3)),
])
@if_has_autograd
def test_frac():
grad_test(lambda x: 1 / x)
grad_test(lambda x: (x + 1) / (x - 1))
grad_test(lambda x: (x + 1) * (x + 2) / ((x - 1) * (x + 3)))
@ti.test()
def test_frac(tifunc):
grad_test(tifunc)


@pytest.mark.parametrize('tifunc,npfunc', [
(lambda x: ti.sqrt(x), lambda x: np.sqrt(x)),
(lambda x: ti.exp(x), lambda x: np.exp(x)),
(lambda x: ti.log(x), lambda x: np.log(x)),
])
@if_has_autograd
def test_unary():
grad_test(lambda x: ti.sqrt(x), lambda x: np.sqrt(x))
grad_test(lambda x: ti.exp(x), lambda x: np.exp(x))
grad_test(lambda x: ti.log(x), lambda x: np.log(x))


@ti.test()
def test_unary(tifunc, npfunc):
grad_test(tifunc, npfunc)


@pytest.mark.parametrize('tifunc,npfunc', [
(lambda x: ti.min(x, 0), lambda x: np.minimum(x, 0)),
(lambda x: ti.min(x, 1), lambda x: np.minimum(x, 1)),
(lambda x: ti.min(0, x), lambda x: np.minimum(0, x)),
(lambda x: ti.min(1, x), lambda x: np.minimum(1, x)),
(lambda x: ti.max(x, 0), lambda x: np.maximum(x, 0)),
(lambda x: ti.max(x, 1), lambda x: np.maximum(x, 1)),
(lambda x: ti.max(0, x), lambda x: np.maximum(0, x)),
(lambda x: ti.max(1, x), lambda x: np.maximum(1, x)),
])
@if_has_autograd
def test_minmax():
grad_test(lambda x: ti.min(x, 0), lambda x: np.minimum(x, 0))
grad_test(lambda x: ti.min(x, 1), lambda x: np.minimum(x, 1))
grad_test(lambda x: ti.min(0, x), lambda x: np.minimum(0, x))
grad_test(lambda x: ti.min(1, x), lambda x: np.minimum(1, x))

grad_test(lambda x: ti.max(x, 0), lambda x: np.maximum(x, 0))
grad_test(lambda x: ti.max(x, 1), lambda x: np.maximum(x, 1))
grad_test(lambda x: ti.max(0, x), lambda x: np.maximum(0, x))
grad_test(lambda x: ti.max(1, x), lambda x: np.maximum(1, x))
@ti.test()
def test_minmax(tifunc, npfunc):
grad_test(tifunc, npfunc)


@if_has_autograd
Expand All @@ -136,32 +154,44 @@ def func2():
func2.grad()


@pytest.mark.parametrize('tifunc,npfunc', [
(lambda x: ti.atan2(0.4, x), lambda x: np.arctan2(0.4, x)),
(lambda y: ti.atan2(y, 0.4), lambda y: np.arctan2(y, 0.4)),
])
@if_has_autograd
def test_atan2():
grad_test(lambda x: ti.atan2(0.4, x), lambda x: np.arctan2(0.4, x))
grad_test(lambda y: ti.atan2(y, 0.4), lambda y: np.arctan2(y, 0.4))
@ti.test()
def test_atan2(tifunc, npfunc):
grad_test(tifunc, npfunc)


@pytest.mark.parametrize('tifunc,npfunc', [
(lambda x: ti.atan2(0.4, x), lambda x: np.arctan2(0.4, x)),
(lambda y: ti.atan2(y, 0.4), lambda y: np.arctan2(y, 0.4)),
])
@if_has_autograd
def test_atan2_f64():
grad_test(lambda x: ti.atan2(0.4, x),
lambda x: np.arctan2(0.4, x),
default_fp=ti.f64)
grad_test(lambda y: ti.atan2(y, 0.4),
lambda y: np.arctan2(y, 0.4),
default_fp=ti.f64)
@ti.test(require=ti.extension.data64, default_fp=ti.f64)
def test_atan2_f64(tifunc, npfunc):
grad_test(tifunc, npfunc)


@pytest.mark.parametrize('tifunc,npfunc', [
(lambda x: 0.4**x, lambda x: np.power(0.4, x)),
(lambda y: y**0.4, lambda y: np.power(y, 0.4)),
])
@if_has_autograd
def test_pow():
grad_test(lambda x: 0.4**x, lambda x: np.power(0.4, x))
grad_test(lambda y: y**0.4, lambda y: np.power(y, 0.4))
@ti.test()
def test_pow(tifunc, npfunc):
grad_test(tifunc, npfunc)


@pytest.mark.parametrize('tifunc,npfunc', [
(lambda x: 0.4**x, lambda x: np.power(0.4, x)),
(lambda y: y**0.4, lambda y: np.power(y, 0.4)),
])
@if_has_autograd
def test_pow_f64():
grad_test(lambda x: 0.4**x, lambda x: np.power(0.4, x), default_fp=ti.f64)
grad_test(lambda y: y**0.4, lambda y: np.power(y, 0.4), default_fp=ti.f64)
@ti.test(require=ti.extension.data64, default_fp=ti.f64)
def test_pow_f64(tifunc, npfunc):
grad_test(tifunc, npfunc)


@ti.test()
Expand Down

0 comments on commit e68cfbd

Please sign in to comment.