Skip to content

Commit

Permalink
[Topi][UnitTests] Parameterize conv2d and depthwise_conv2d tests (apa…
Browse files Browse the repository at this point in the history
…che#8433)

* [UnitTests][Topi] Updated test_topi_conv2d_nchw.py to have parametrized tests.

- Better error messages, displays which workloads/targets failed and why.

- Fixed bug in topi.nn.conv2d._get_workload exposed by the
  parametrized tests.  Incorrect padding if the "SAME" parameter is
  used with dilation>1.

- Fixed bug in tvm.topi.x86.group_conv2d._get_default_config, missing
  dilation parameter in call to _get_conv2d_workload.

* [UnitTests][Topi] Parametrized the tests in test_topi_depthwise_conv2d.py

In preparation for parametrizing to test on float16 as well.

- Single test_conv2d test with parameters for layout/input sizes.

- Extended the support for NCHWc layouts, so that they could be
  included in the parametrization.  (Implemented
  topi.testing.depthwise_conv2d_python_nchwc and
  topi.nn.scale_shift_nchwc, added layout argument to
  topi.nn.depthwise_conv2d._get_workload).

Co-authored-by: Eric Lunderberg <elunderberg@octoml.ai>
  • Loading branch information
2 people authored and ylc committed Sep 29, 2021
1 parent e097863 commit e3ee581
Show file tree
Hide file tree
Showing 8 changed files with 736 additions and 756 deletions.
5 changes: 4 additions & 1 deletion python/tvm/topi/nn/conv2d.py
Original file line number Diff line number Diff line change
Expand Up @@ -176,10 +176,13 @@ def _get_workload(data, kernel, stride, padding, dilation, out_dtype, data_layou
else:
KH, KW, CIG, CO = get_const_tuple(kernel.shape)

pt, pl, pb, pr = get_pad_tuple(padding, (get_const_int(KH), get_const_int(KW)))
dilation_h, dilation_w = (
dilation if isinstance(dilation, (tuple, list)) else (dilation, dilation)
)
pt, pl, pb, pr = get_pad_tuple(
padding,
(get_const_int((KH - 1) * dilation_h + 1), get_const_int((KW - 1) * dilation_w + 1)),
)
GRPS = CI // CIG
if isinstance(stride, (tuple, list)):
HSTR, WSTR = stride
Expand Down
52 changes: 44 additions & 8 deletions python/tvm/topi/nn/depthwise_conv2d.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@
from .dilate import dilate
from .pad import pad
from .utils import get_pad_tuple
from ..utils import simplify
from ..utils import simplify, get_const_tuple

# workload description of depthwise-conv2d
Workload = namedtuple(
Expand All @@ -50,11 +50,47 @@
)


def _get_workload(data, kernel, stride, padding, dilation, out_dtype):
"""Get the workload structure."""
_, in_channel, height, width = [x.value for x in data.shape]
channel, channel_multiplier, kh, kw = [x.value for x in kernel.shape]
out_channel = channel * channel_multiplier
def _get_workload(data, kernel, stride, padding, dilation, out_dtype, data_layout="NCHW"):
"""Get the workload structure for a depthwise conv2d.
Input data and filter should use NCHW layout.
"""
if data_layout == "NCHW":
_, in_channel, height, width = get_const_tuple(data.shape)
filter_channel, channel_multiplier, kh, kw = get_const_tuple(kernel.shape)
elif data_layout == "NHWC":
_, height, width, in_channel = get_const_tuple(data.shape)
kh, kw, filter_channel, channel_multiplier = get_const_tuple(kernel.shape)
elif data_layout == "NCHWc":
_, in_channel_chunk, height, width, in_channel_block = get_const_tuple(data.shape)
in_channel = in_channel_chunk * in_channel_block
(
filter_channel_chunk,
cm_chunk,
kh,
kw,
cm_block,
filter_channel_block,
) = get_const_tuple(kernel.shape)
filter_channel = filter_channel_chunk * filter_channel_block
channel_multiplier = cm_chunk * cm_block

assert (
in_channel_block == filter_channel_block
), "Incorrect dimensions, data has block size {}, but filter has block size {}".format(
in_channel_block, filter_channel_block
)

else:
raise ValueError("Data layout {} not supported".format(data_layout))

assert (
in_channel == filter_channel
), "Incorrect dimensions, data has {} channels but filter expects {} channels".format(
in_channel, filter_channel
)

out_channel = filter_channel * channel_multiplier
dilation_h, dilation_w = (
dilation if isinstance(dilation, (tuple, list)) else (dilation, dilation)
)
Expand Down Expand Up @@ -102,8 +138,8 @@ def depthwise_conv2d_nchw(Input, Filter, stride, padding, dilation, out_dtype=No
Filter : tvm.te.Tensor
4-D with shape [in_channel, channel_multiplier, filter_height, filter_width]
stride : tuple of two ints
The spatial stride along height and width
stride : int or a list/tuple of two ints
The spatial stride, or (stride_height, stride_width).
padding : int or str
Padding size, or ['VALID', 'SAME']
Expand Down
31 changes: 29 additions & 2 deletions python/tvm/topi/nn/mapping.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ def scale_shift_nchw(Input, Scale, Shift):
Parameters
----------
Input : tvm.te.Tensor
Input tensor, layout is NCHW
4-D input tensor, NCHW layout [batch, channel, height, width]
Scale : tvm.te.Tensor
Scale tensor, 1-D of size channel number
Expand All @@ -54,7 +54,7 @@ def scale_shift_nhwc(Input, Scale, Shift):
Parameters
----------
Input : tvm.te.Tensor
Input tensor, layout is NHWC
4-D input tensor, NHWC layout [batch, height, width, channel]
Scale : tvm.te.Tensor
Scale tensor, 1-D of size channel number
Expand All @@ -70,3 +70,30 @@ def scale_shift_nhwc(Input, Scale, Shift):
return te.compute(
Input.shape, lambda b, i, j, c: Input[b, i, j, c] * Scale[c] + Shift[c], name="ScaleShift"
)


@tvm.te.tag_scope(tag=tag.BROADCAST)
def scale_shift_nchwc(Input, Scale, Shift):
"""Batch normalization operator in inference.
Parameters
----------
Input : tvm.te.Tensor
5-D input tensor, NCHWc layout [batch, channel_chunk, height, width, channel_block]
Scale : tvm.te.Tensor
Scale tensor, 2-D of size [channel_chunk, channel_block]
Shift : tvm.te.Tensor
Shift tensor, 2-D of size [channel_chunk, channel_block]
Returns
-------
Output : tvm.te.Tensor
Output tensor, layout is NHWC
"""
return te.compute(
Input.shape,
lambda b, cc, i, j, cb: Input[b, cc, i, j, cb] * Scale[cc, cb] + Shift[cc, cb],
name="ScaleShift",
)
6 changes: 5 additions & 1 deletion python/tvm/topi/testing/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,11 @@
from .conv1d_transpose_ncw_python import conv1d_transpose_ncw_python
from .correlation_nchw_python import correlation_nchw_python
from .deformable_conv2d_python import deformable_conv2d_nchw_python, deformable_conv2d_nhwc_python
from .depthwise_conv2d_python import depthwise_conv2d_python_nchw, depthwise_conv2d_python_nhwc
from .depthwise_conv2d_python import (
depthwise_conv2d_python_nchw,
depthwise_conv2d_python_nhwc,
depthwise_conv2d_python_nchwc,
)
from .dilate_python import dilate_python
from .softmax_python import softmax_python, log_softmax_python
from .resize_python import resize1d_python, resize2d_python, resize3d_python
Expand Down
57 changes: 57 additions & 0 deletions python/tvm/topi/testing/depthwise_conv2d_python.py
Original file line number Diff line number Diff line change
Expand Up @@ -89,6 +89,63 @@ def depthwise_conv2d_python_nchw(input_np, filter_np, stride, padding):
return output_np


def depthwise_conv2d_python_nchwc(input_np, filter_np, stride, padding):
"""Depthwise convolution operator in NCHWc layout.
Parameters
----------
input_np : numpy.ndarray
5-D with shape [batch, in_channel_chunk, in_height, in_width, in_channel_block]
filter_np : numpy.ndarray
6-D with shape [out_channel_chunk, channel_multiplier_chunk,
filter_height, filter_width,
channel_multiplier_block, out_channel_block]
stride : list / tuple of 2 ints
[stride_height, stride_width]
padding : str
'VALID' or 'SAME'
Returns
-------
output_np : np.ndarray
5-D with shape [batch, out_channel_chunk, out_height, out_width, out_channel_block]
"""
# Transform to NCHW
batch_size, in_channel_chunk, in_height, in_width, in_channel_block = input_np.shape
input_nchw = input_np.transpose(0, 1, 4, 2, 3).reshape(
(batch_size, in_channel_chunk * in_channel_block, in_height, in_width)
)

(
out_channel_chunk,
channel_multiplier_chunk,
filter_height,
filter_width,
channel_multiplier_block,
out_channel_block,
) = filter_np.shape
filter_nchw = filter_np.transpose(0, 5, 1, 4, 2, 3).reshape(
(
out_channel_chunk * out_channel_block,
channel_multiplier_chunk * channel_multiplier_block,
filter_height,
filter_width,
)
)

# Perform conv2d
output_np = depthwise_conv2d_python_nchw(input_nchw, filter_nchw, stride, padding)

# Transform back
batch_size, out_channel, out_height, out_width = output_np.shape
return output_np.reshape(
(batch_size, out_channel_chunk, out_channel_block, out_height, out_width)
).transpose(0, 1, 3, 4, 2)


def depthwise_conv2d_python_nhwc(input_np, filter_np, stride, padding):
"""Depthwise convolution operator in nchw layout.
Expand Down
7 changes: 5 additions & 2 deletions python/tvm/topi/x86/group_conv2d.py
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,9 @@ def schedule_group_conv2d_nchw(outs):
return schedule_group_conv2d_nchwc(outs)


def _get_default_config(cfg, data, kernel, strides, padding, groups, out_dtype, layout="NCHW"):
def _get_default_config(
cfg, data, kernel, strides, padding, dilation, groups, out_dtype, layout="NCHW"
):
"""
Get default schedule config for the workload
"""
Expand All @@ -55,7 +57,7 @@ def _get_default_config(cfg, data, kernel, strides, padding, groups, out_dtype,
static_data_shape.append(dim)
data = te.placeholder(static_data_shape, dtype=data.dtype)

wkl = _get_conv2d_workload(data, kernel, strides, padding, out_dtype, layout)
wkl = _get_conv2d_workload(data, kernel, strides, padding, dilation, out_dtype, layout)
_fallback_schedule(cfg, wkl)


Expand Down Expand Up @@ -159,6 +161,7 @@ def group_conv2d_nchw_spatial_pack(
),
strides,
padding,
dilation,
groups,
out_dtype,
)
Expand Down
Loading

0 comments on commit e3ee581

Please sign in to comment.