Skip to content

Commit

Permalink
Add yaml and unittest for instance_norm op (#43060)
Browse files Browse the repository at this point in the history
* add yaml

* fix infrt compile bugs
  • Loading branch information
YuanRisheng authored Jun 1, 2022
1 parent b23914c commit 56ae33b
Show file tree
Hide file tree
Showing 16 changed files with 134 additions and 12 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,8 @@
"leaky_relu_double_grad", "sqrt_double_grad", "rsqrt_double_grad",
"square_double_grad", "celu_double_grad", "pad_double_grad",
"pad3d_double_grad", "squeeze_double_grad", "unsqueeze_double_grad",
"conv3d_double_grad", "depthwise_conv2d_grad_grad"
"instance_norm_double_grad", "conv3d_double_grad",
"depthwise_conv2d_grad_grad"
])

# For API dispatch used at python-level
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1404,7 +1404,7 @@ def GenerateNodeDefinition(self, next_grad_node_creation_str,
const auto& out_metas = OutputMeta();
paddle::small_vector<std::vector<paddle::experimental::Tensor>, egr::kSlotSmallVectorSize> returns({slot_num_bwd_outputs});
for (int i = 0; i < {slot_num_bwd_outputs}; ++i) {{
returns[i].resize(out_metas[i].size());
out_metas[i].size() == 0 ? returns[i].resize(1) : returns[i].resize(out_metas[i].size());
}}
"""

Expand Down
2 changes: 1 addition & 1 deletion paddle/phi/infermeta/backward.cc
Original file line number Diff line number Diff line change
Expand Up @@ -313,10 +313,10 @@ void GumbelSoftmaxGradInferMeta(const MetaTensor& out,
}

void InstanceNormGradInferMeta(const MetaTensor& x,
const MetaTensor& y_grad,
const MetaTensor& scale,
const MetaTensor& saved_mean,
const MetaTensor& saved_variance,
const MetaTensor& y_grad,
float epsilon,
MetaTensor* x_grad,
MetaTensor* scale_grad,
Expand Down
2 changes: 1 addition & 1 deletion paddle/phi/infermeta/backward.h
Original file line number Diff line number Diff line change
Expand Up @@ -145,10 +145,10 @@ void GumbelSoftmaxGradInferMeta(const MetaTensor& out,
MetaTensor* dx);

void InstanceNormGradInferMeta(const MetaTensor& x,
const MetaTensor& y_grad,
const MetaTensor& scale,
const MetaTensor& saved_mean,
const MetaTensor& saved_variance,
const MetaTensor& y_grad,
float epsilon,
MetaTensor* x_grad,
MetaTensor* scale_grad,
Expand Down
2 changes: 1 addition & 1 deletion paddle/phi/kernels/cpu/instance_norm_grad_kernel.cc
Original file line number Diff line number Diff line change
Expand Up @@ -42,10 +42,10 @@ using EigenVectorArrayMap = Eigen::Map<Eigen::Array<T, Eigen::Dynamic, 1>>;
template <typename T, typename Context>
void InstanceNormGradKernel(const Context& dev_ctx,
const DenseTensor& x,
const DenseTensor& d_y,
const paddle::optional<DenseTensor>& scale,
const DenseTensor& saved_mean,
const DenseTensor& saved_variance,
const DenseTensor& d_y,
float epsilon,
DenseTensor* d_x,
DenseTensor* d_scale,
Expand Down
2 changes: 1 addition & 1 deletion paddle/phi/kernels/gpu/instance_norm_grad_kernel.cu
Original file line number Diff line number Diff line change
Expand Up @@ -290,10 +290,10 @@ __global__ void DoubleGradComputeDScale(const T *x,
template <typename T, typename Context>
void InstanceNormGradKernel(const Context &dev_ctx,
const DenseTensor &x,
const DenseTensor &d_y,
const paddle::optional<DenseTensor> &scale,
const DenseTensor &saved_mean,
const DenseTensor &saved_variance,
const DenseTensor &d_y,
float epsilon_f,
DenseTensor *d_x,
DenseTensor *d_scale,
Expand Down
2 changes: 1 addition & 1 deletion paddle/phi/kernels/instance_norm_grad_kernel.h
Original file line number Diff line number Diff line change
Expand Up @@ -21,10 +21,10 @@ namespace phi {
template <typename T, typename Context>
void InstanceNormGradKernel(const Context& dev_ctx,
const DenseTensor& x,
const DenseTensor& y_grad,
const paddle::optional<DenseTensor>& scale,
const DenseTensor& saved_mean,
const DenseTensor& saved_variance,
const DenseTensor& y_grad,
float epsilon,
DenseTensor* x_grad,
DenseTensor* scale_grad,
Expand Down
2 changes: 1 addition & 1 deletion paddle/phi/ops/compat/instance_norm_sig.cc
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ KernelSignature InstanceNormOpArgumentMapping(
KernelSignature InstanceNormGradOpArgumentMapping(
const ArgumentMappingContext& ctx) {
return KernelSignature("instance_norm_grad",
{"X", "Y@GRAD", "Scale", "SavedMean", "SavedVariance"},
{"X", "Scale", "SavedMean", "SavedVariance", "Y@GRAD"},
{"epsilon"},
{"X@GRAD", "Scale@GRAD", "Bias@GRAD"});
}
Expand Down
6 changes: 5 additions & 1 deletion python/paddle/fluid/dygraph/nn.py
Original file line number Diff line number Diff line change
Expand Up @@ -1137,7 +1137,11 @@ def __init__(self,
self.bias = None

def forward(self, input):
if _non_static_mode():
if in_dygraph_mode():
out, _, _, = _C_ops.final_state_instance_norm(
input, self.scale, self.bias, self._epsilon)
return out
if _in_legacy_dygraph():
out, _, _ = _C_ops.instance_norm(input, self.scale, self.bias,
'epsilon', self._epsilon)
return out
Expand Down
9 changes: 9 additions & 0 deletions python/paddle/fluid/tests/unittests/test_instance_norm_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@
from op_test import OpTest
from paddle.fluid import Program, program_guard
from paddle.fluid.dygraph import to_variable
from paddle.fluid.framework import _test_eager_guard


def _reference_instance_norm_naive(x, scale, bias, epsilon, mean, var):
Expand Down Expand Up @@ -253,6 +254,10 @@ def test_norm(self):
outputs = instance_norm(to_variable(inputs))
self.assertTrue(np.allclose(outputs.numpy(), out_np, atol=1e-6))

def test_eager_api(self):
with _test_eager_guard():
self.test_norm()


class TestElasticNormOpCase2(unittest.TestCase):
def init_test_case(self):
Expand Down Expand Up @@ -282,6 +287,10 @@ def test_norm(self):
outputs = instance_norm(to_variable(inputs))
self.assertTrue(np.allclose(outputs.numpy(), out_np, atol=1e-6))

def test_eager_api(self):
with _test_eager_guard():
self.test_norm()


if __name__ == '__main__':
unittest.main()
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@
from paddle.fluid.framework import grad_var_name
import paddle.fluid as fluid
from paddle.fluid import Program, program_guard
from paddle.fluid.framework import _test_eager_guard
import paddle


Expand Down Expand Up @@ -116,6 +117,11 @@ def compute_v2(x_np):
y2 = compute_v2(x)
self.assertTrue(np.allclose(y1, y2))

def test_eager_api(self):
with _test_eager_guard():
self.test_dygraph()
self.test_error()


if __name__ == '__main__':
unittest.main()
66 changes: 66 additions & 0 deletions python/paddle/fluid/tests/unittests/test_norm_nn_grad.py
Original file line number Diff line number Diff line change
Expand Up @@ -70,6 +70,72 @@ def func(self, place):
[x], z, x_init=x_arr, atol=atol, place=place, eps=eps)


class TestInstanceNormDoubleGradEagerCheck(unittest.TestCase):
def instance_norm_wrapper(self, x):
return paddle.nn.functional.instance_norm(x[0])

@prog_scope()
def func(self, place):
prog = fluid.Program()
with fluid.program_guard(prog):
np.random.seed()
shape = [2, 3, 4, 5]
dtype = "float32"
eps = 0.005
atol = 1e-4
x = layers.create_parameter(dtype=dtype, shape=shape, name='x')
z = paddle.nn.functional.instance_norm(x)
x_arr = np.random.uniform(-1, 1, shape).astype(dtype)
# check for static mode
gradient_checker.double_grad_check(
[x], z, x_init=x_arr, atol=atol, place=place, eps=eps)
# check for eager mode
gradient_checker.double_grad_check_for_dygraph(
self.instance_norm_wrapper, [x],
z,
x_init=x_arr,
atol=atol,
place=place)

def test_grad(self):
paddle.enable_static()
places = [fluid.CPUPlace()]
if core.is_compiled_with_cuda():
places.append(fluid.CUDAPlace(0))
for p in places:
self.func(p)


class TestInstanceNormDoubleGradEagerCheckWithParams(
TestInstanceNormDoubleGradEagerCheck):
def instance_norm_wrapper(self, x):
instance_norm = paddle.nn.InstanceNorm2D(3)
return instance_norm(x[0])

@prog_scope()
def func(self, place):
prog = fluid.Program()
with fluid.program_guard(prog):
np.random.seed()
shape = [2, 3, 4, 5]
dtype = "float32"
eps = 0.005
atol = 1e-4
x = layers.create_parameter(dtype=dtype, shape=shape, name='x')
z = paddle.nn.InstanceNorm2D(3)(x)
x_arr = np.random.uniform(-1, 1, shape).astype(dtype)
# check for static mode
gradient_checker.double_grad_check(
[x], z, x_init=x_arr, atol=atol, place=place, eps=eps)
# check for eager mode
gradient_checker.double_grad_check_for_dygraph(
self.instance_norm_wrapper, [x],
z,
x_init=x_arr,
atol=atol,
place=place)


class TestBatchNormDoubleGradCheck(unittest.TestCase):
def setUp(self):
self.init_test()
Expand Down
6 changes: 4 additions & 2 deletions python/paddle/nn/functional/norm.py
Original file line number Diff line number Diff line change
Expand Up @@ -407,8 +407,10 @@ def instance_norm(x,
print(instance_norm_out)
"""

if in_dynamic_mode():
if in_dygraph_mode():
out, _, _, = _C_ops.final_state_instance_norm(x, weight, bias, eps)
return out
if _in_legacy_dygraph():
out, _, _ = _C_ops.instance_norm(x, weight, bias, "epsilon", eps,
"momentum", momentum, "data_format",
data_format)
Expand Down
11 changes: 11 additions & 0 deletions python/paddle/utils/code_gen/api.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -1030,6 +1030,17 @@
data_type : x
backward : index_select_grad

- api : instance_norm
args : (Tensor x, Tensor scale, Tensor bias, float epsilon)
output : Tensor(y), Tensor(saved_mean), Tensor(saved_variance)
infer_meta :
func : InstanceNormInferMeta
kernel :
func : instance_norm
data_type : x
optional : scale, bias
backward : instance_norm_grad

# is_empty
- api : is_empty
args : (Tensor x)
Expand Down
23 changes: 23 additions & 0 deletions python/paddle/utils/code_gen/backward.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -927,6 +927,29 @@
data_type : x
no_need_buffer : x

- backward_api : instance_norm_double_grad
forward : instance_norm_grad(Tensor x, Tensor fwd_scale, Tensor saved_mean, Tensor saved_variance, Tensor grad_y, float epsilon) -> Tensor(grad_x), Tensor(grad_scale), Tensor(grad_bias)
args : (Tensor x, Tensor fwd_scale, Tensor saved_mean, Tensor saved_variance, Tensor grad_y, Tensor grad_x_grad, Tensor grad_scale_grad, Tensor grad_bias_grad, float epsilon)
output : Tensor(x_grad), Tensor(fwd_scale_grad), Tensor(grad_y_grad)
infer_meta :
func : InstanceNormDoubleGradInferMeta
kernel :
func : instance_norm_double_grad
data_type : x
optional : fwd_scale, grad_x_grad, grad_scale_grad, grad_bias_grad

- backward_api : instance_norm_grad
forward : instance_norm(Tensor x, Tensor scale, Tensor bias, float epsilon) -> Tensor(y), Tensor(saved_mean), Tensor(saved_variance)
args : (Tensor x, Tensor scale, Tensor saved_mean, Tensor saved_variance, Tensor y_grad, float epsilon)
output : Tensor(x_grad), Tensor(scale_grad), Tensor(bias_grad)
infer_meta :
func : InstanceNormGradInferMeta
kernel :
func : instance_norm_grad
data_type : x
optional : scale
backward : instance_norm_double_grad

- backward_api : kldiv_loss_grad
forward : kldiv_loss(Tensor x, Tensor label, str reduction) -> Tensor(out)
args : (Tensor x, Tensor label, Tensor out_grad, str reduction)
Expand Down
2 changes: 1 addition & 1 deletion tools/infrt/skipped_phi_api.json
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
{
"phi_apis":["conj", "deformable_conv", "dropout", "expand_as", "nll_loss", "psroi_pool", "roi_align", "roi_pool", "label_smooth", "layer_norm"],
"phi_apis":["conj", "deformable_conv", "dropout", "expand_as", "nll_loss", "psroi_pool", "roi_align", "roi_pool", "label_smooth", "layer_norm", "instance_norm"],
"phi_kernels":["equal_all"]
}

0 comments on commit 56ae33b

Please sign in to comment.