Skip to content

Commit

Permalink
add conv2d relay test
Browse files Browse the repository at this point in the history
  • Loading branch information
masahi committed Apr 14, 2022
1 parent 7291e47 commit dd956ec
Showing 1 changed file with 60 additions and 30 deletions.
90 changes: 60 additions & 30 deletions tests/python/relay/test_op_level2.py
Original file line number Diff line number Diff line change
Expand Up @@ -1222,13 +1222,7 @@ def _test_pool3d(
for shape_dtype in ["int32", "int64"]:
x = relay.var("x", shape=[tvm.tir.IntImm(shape_dtype, x) for x in dshape], dtype=dtype)
pool_type = "max" if "max" in str(opfunc) else "avg"
y = opfunc(
x,
pool_size=pool_size,
strides=strides,
padding=padding,
dilation=dilation,
)
y = opfunc(x, pool_size=pool_size, strides=strides, padding=padding, dilation=dilation,)
func = relay.Function([x], y)
data = np.random.uniform(size=dshape).astype(dtype)
ref_res = tvm.topi.testing.poolnd_python(
Expand Down Expand Up @@ -1582,18 +1576,14 @@ def _test_upsampling3d(layout, method, coordinate_transformation_mode="half_pixe

def get_shape():
if layout == "NCDHW":
return (c, d, h, w), (
c,
int(round(d * scale_d)),
int(round(h * scale_h)),
int(round(w * scale_w)),
return (
(c, d, h, w),
(c, int(round(d * scale_d)), int(round(h * scale_h)), int(round(w * scale_w)),),
)
else:
return (d, h, w, c), (
int(round(d * scale_d)),
int(round(h * scale_h)),
int(round(w * scale_w)),
c,
return (
(d, h, w, c),
(int(round(d * scale_d)), int(round(h * scale_h)), int(round(w * scale_w)), c,),
)

ishape, oshape = get_shape()
Expand Down Expand Up @@ -1691,13 +1681,7 @@ def fast_int8_intrinsic(self, target):

@tvm.testing.fixture
def assembly(
self,
target,
dtypes,
input_channels,
output_channels,
data_layout,
kernel_layout,
self, target, dtypes, input_channels, output_channels, data_layout, kernel_layout,
):
input_dtype, weight_dtype, output_dtype = dtypes

Expand Down Expand Up @@ -1759,9 +1743,7 @@ def assembly(
],
)
def test_uses_intrinsic(
self,
fast_int8_intrinsic,
assembly,
self, fast_int8_intrinsic, assembly,
):
assert fast_int8_intrinsic in assembly

Expand All @@ -1770,9 +1752,7 @@ def test_uses_intrinsic(
@tvm.testing.parametrize_targets(*supported_targets)
@pytest.mark.parametrize("dtypes", [("uint8", "uint8", "int32")])
def test_no_intrinsic(
self,
fast_int8_intrinsic,
assembly,
self, fast_int8_intrinsic, assembly,
):
assert fast_int8_intrinsic not in assembly

Expand Down Expand Up @@ -1944,5 +1924,55 @@ def _test_correlation(
)


@pytest.mark.skip("Requires GFX10 AMDGPU")
def test_conv2d_rocm_sdot4():
d_shape = (1, 64, 56, 56)
w_shape = (64, 64, 3, 3)
padding = (1, 1)
strides = (1, 1)
data_dtype = "int8"
weight_dtype = "int8"
out_dtype = "int32"

data = relay.var("data", shape=d_shape, dtype=data_dtype)
weight = relay.var("weight", shape=w_shape, dtype=weight_dtype)
out_channel = w_shape[0]
conv2d = relay.nn.conv2d(
data=data,
weight=weight,
kernel_size=w_shape[2:],
channels=out_channel,
padding=padding,
strides=strides,
out_dtype=out_dtype,
)

mod = tvm.IRModule.from_expr(conv2d)

data_np = np.random.uniform(1, 10, d_shape).astype("int8")
weight_np = np.random.uniform(1, 10, size=w_shape).astype("int8")

target = "rocm -mattr=+dotprod"
with tvm.transform.PassContext(opt_level=3):
lib = relay.build(mod, target=target, params={"weight": weight_np})

asm = lib.lib.imported_modules[0].get_source("asm")
assert "v_dot4_i32_i8" in asm

dev = tvm.device(target, 0)
runtime = tvm.contrib.graph_executor.GraphModule(lib["default"](dev))

runtime.set_input("data", data_np)
runtime.run()

out = runtime.get_output(0).numpy()

ref = tvm.topi.testing.conv2d_nchw_python(
data_np.astype("int32"), weight_np.astype("int32"), strides, padding
)

np.testing.assert_equal(out, ref)


if __name__ == "__main__":
sys.exit(pytest.main(sys.argv))

0 comments on commit dd956ec

Please sign in to comment.