Skip to content

Commit

Permalink
updated format of two py files
Browse files Browse the repository at this point in the history
  • Loading branch information
Joe (Chien-Chun) Chou committed Oct 19, 2021
1 parent 5e77c54 commit 781ebe1
Show file tree
Hide file tree
Showing 2 changed files with 100 additions and 22 deletions.
65 changes: 56 additions & 9 deletions python/tvm/relay/op/nn/nn.py
Original file line number Diff line number Diff line change
Expand Up @@ -748,7 +748,14 @@ def log_softmax(data, axis=-1):


def max_pool1d(
data, pool_size=(1,), strides=(1,), dilation=(1,), padding=(0,), layout="NCW", out_layout="", ceil_mode=False
data,
pool_size=(1,),
strides=(1,),
dilation=(1,),
padding=(0,),
layout="NCW",
out_layout="",
ceil_mode=False,
):
r"""1D maximum pooling operator.
Expand Down Expand Up @@ -801,7 +808,9 @@ def max_pool1d(
if isinstance(dilation, int):
dilation = (dilation,)
padding = get_pad_tuple1d(padding)
return _make.max_pool1d(data, pool_size, strides, dilation, padding, layout, out_layout, ceil_mode)
return _make.max_pool1d(
data, pool_size, strides, dilation, padding, layout, out_layout, ceil_mode
)


def max_pool2d(
Expand Down Expand Up @@ -873,7 +882,9 @@ def max_pool2d(
if isinstance(dilation, int):
dilation = (dilation, dilation)
padding = get_pad_tuple2d(padding)
return _make.max_pool2d(data, pool_size, strides, dilation, padding, layout, out_layout, ceil_mode)
return _make.max_pool2d(
data, pool_size, strides, dilation, padding, layout, out_layout, ceil_mode
)


def max_pool3d(
Expand Down Expand Up @@ -938,7 +949,9 @@ def max_pool3d(
if isinstance(dilation, int):
dilation = (dilation, dilation, dilation)
padding = get_pad_tuple3d(padding)
return _make.max_pool3d(data, pool_size, strides, dilation, padding, layout, out_layout, ceil_mode)
return _make.max_pool3d(
data, pool_size, strides, dilation, padding, layout, out_layout, ceil_mode
)


def avg_pool1d(
Expand Down Expand Up @@ -1007,7 +1020,15 @@ def avg_pool1d(
dilation = (dilation,)
padding = get_pad_tuple1d(padding)
return _make.avg_pool1d(
data, pool_size, strides, dilation, padding, layout, out_layout, ceil_mode, count_include_pad
data,
pool_size,
strides,
dilation,
padding,
layout,
out_layout,
ceil_mode,
count_include_pad,
)


Expand Down Expand Up @@ -1086,7 +1107,15 @@ def avg_pool2d(
dilation = (dilation, dilation)
padding = get_pad_tuple2d(padding)
return _make.avg_pool2d(
data, pool_size, strides, dilation, padding, layout, out_layout, ceil_mode, count_include_pad
data,
pool_size,
strides,
dilation,
padding,
layout,
out_layout,
ceil_mode,
count_include_pad,
)


Expand Down Expand Up @@ -1157,7 +1186,15 @@ def avg_pool3d(
dilation = (dilation, dilation, dilation)
padding = get_pad_tuple3d(padding)
return _make.avg_pool3d(
data, pool_size, strides, dilation, padding, layout, out_layout, ceil_mode, count_include_pad
data,
pool_size,
strides,
dilation,
padding,
layout,
out_layout,
ceil_mode,
count_include_pad,
)


Expand Down Expand Up @@ -1206,7 +1243,9 @@ def max_pool2d_grad(
result : tvm.relay.Expr
The computed result.
"""
return _make.max_pool2d_grad(out_grad, data, pool_size, strides, padding, layout, out_layout, ceil_mode)
return _make.max_pool2d_grad(
out_grad, data, pool_size, strides, padding, layout, out_layout, ceil_mode
)


def avg_pool2d_grad(
Expand Down Expand Up @@ -1259,7 +1298,15 @@ def avg_pool2d_grad(
The computed result.
"""
return _make.avg_pool2d_grad(
out_grad, data, pool_size, strides, padding, layout, out_layout, ceil_mode, count_include_pad
out_grad,
data,
pool_size,
strides,
padding,
layout,
out_layout,
ceil_mode,
count_include_pad,
)


Expand Down
57 changes: 44 additions & 13 deletions tests/python/relay/test_pass_convert_op_layout.py
Original file line number Diff line number Diff line change
Expand Up @@ -2054,8 +2054,15 @@ def convert_maxpool2d(attrs, inputs, tinfos, desired_layouts):
def before():
x = relay.var("x", shape=(1, 64, 56, 56))
weight = relay.var("weight", shape=(64, 64, 3, 3))
y = relay.nn.conv2d(x, weight, channels=64, kernel_size=(3, 3), padding=(1, 1),
data_layout="NCHW", kernel_layout="OIHW")
y = relay.nn.conv2d(
x,
weight,
channels=64,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NCHW",
kernel_layout="OIHW",
)
y = relay.nn.relu(y)
y = relay.nn.max_pool2d(y, pool_size=(2, 2), layout="NCHW")
y = relay.nn.batch_flatten(y)
Expand All @@ -2067,8 +2074,15 @@ def expected():
weight = relay.var("weight", shape=(64, 64, 3, 3))
x = relay.layout_transform(x, "NCHW", "NHWC")
weight = relay.layout_transform(weight, "OIHW", "OHWI")
y = relay.nn.conv2d(x, weight, channels=64, kernel_size=(3, 3), padding=(1, 1),
data_layout="NHWC", kernel_layout="OHWI")
y = relay.nn.conv2d(
x,
weight,
channels=64,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NHWC",
kernel_layout="OHWI",
)
y = relay.nn.relu(y)
y = relay.nn.max_pool2d(y, pool_size=(2, 2), layout="NHWC", out_layout="NHWC")
y = relay.layout_transform(y, "NHWC", "NCHW")
Expand All @@ -2078,10 +2092,7 @@ def expected():

a = before()
a = run_opt_pass(
a,
transform.ConvertLayout(
{"nn.conv2d": ["NHWC", "OHWI"], "nn.max_pool2d": ["NHWC"]}
)
a, transform.ConvertLayout({"nn.conv2d": ["NHWC", "OHWI"], "nn.max_pool2d": ["NHWC"]})
)
b = run_opt_pass(expected(), transform.InferType())

Expand Down Expand Up @@ -2145,7 +2156,10 @@ def expected():
return y

a = before()
a = run_opt_pass(a, transform.ConvertLayout({"nn.conv2d": ["NCHW", "OIHW"], "nn.max_pool2d": ["NHWC"]}))
a = run_opt_pass(
a,
transform.ConvertLayout({"nn.conv2d": ["NCHW", "OIHW"], "nn.max_pool2d": ["NHWC"]}),
)
b = run_opt_pass(expected(), transform.InferType())

assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a) + "\n\n Expected = \n" + str(b)
Expand Down Expand Up @@ -2254,15 +2268,22 @@ def expected(N, CI, H, W, CO, KH, KW, OH, OW, src_layout, dst_layout, max_pool_l
y = relay.nn.relu(y)
if max_pool_layout != layout_map["dst"]["data_layout"]:
y = relay.layout_transform(y, layout_map["dst"]["data_layout"], max_pool_layout)
y = relay.nn.max_pool2d(y, pool_size=(2, 2), layout=max_pool_layout, out_layout=max_pool_layout)
y = relay.nn.max_pool2d(
y, pool_size=(2, 2), layout=max_pool_layout, out_layout=max_pool_layout
)
y = relay.cast(y, "int32")
y = relay.nn.batch_flatten(y)
y = relay.Function(analysis.free_vars(y), y)
return y

# NHWC -> NCHW
a = before(1, 3, 224, 224, 32, 3, 3, "NHWC")
a = run_opt_pass(a, transform.ConvertLayout({"nn.deformable_conv2d": ["NCHW", "default"], "nn.max_pool2d": ["NHWC"]}))
a = run_opt_pass(
a,
transform.ConvertLayout(
{"nn.deformable_conv2d": ["NCHW", "default"], "nn.max_pool2d": ["NHWC"]}
),
)
# - in the before() func, its last argument "NHWC" is also the layout of max_pool
b = run_opt_pass(
# max_pool has its own layout argument
Expand All @@ -2273,7 +2294,12 @@ def expected(N, CI, H, W, CO, KH, KW, OH, OW, src_layout, dst_layout, max_pool_l

# NCHW -> NHWC
a = before(1, 3, 224, 224, 32, 3, 3, "NCHW")
a = run_opt_pass(a, transform.ConvertLayout({"nn.deformable_conv2d": ["NHWC", "default"], "nn.max_pool2d": ["NCHW"]}))
a = run_opt_pass(
a,
transform.ConvertLayout(
{"nn.deformable_conv2d": ["NHWC", "default"], "nn.max_pool2d": ["NCHW"]}
),
)
# - in the before() func, its last argument "NCHW" is also the layout of max_pool
b = run_opt_pass(
# max_pool has its own layout argument
Expand Down Expand Up @@ -2334,7 +2360,12 @@ def expected():
return relay.Function(analysis.free_vars(y), y)

a = before()
a = run_opt_pass(a, transform.ConvertLayout({"nn.conv2d": ["NCHW", "default"], "nn.global_max_pool2d": ["NHWC"]}))
a = run_opt_pass(
a,
transform.ConvertLayout(
{"nn.conv2d": ["NCHW", "default"], "nn.global_max_pool2d": ["NHWC"]}
),
)
b = run_opt_pass(expected(), transform.InferType())

assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a) + "\n\n Expected = \n" + str(b)
Expand Down

0 comments on commit 781ebe1

Please sign in to comment.