Skip to content

Commit

Permalink
Fix partitioning for unsupported operation inside supported resnet/in…
Browse files Browse the repository at this point in the history
…ception like block & add test
  • Loading branch information
Jorn Tuyls committed Apr 9, 2021
1 parent 2bbffc7 commit 485b7c1
Show file tree
Hide file tree
Showing 10 changed files with 847 additions and 1,242 deletions.
15 changes: 15 additions & 0 deletions python/pyxir/_libpyxir.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,15 @@
# Copyright 2021 Xilinx Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

from libpyxir import *
Original file line number Diff line number Diff line change
Expand Up @@ -123,7 +123,6 @@ def from_relay_to_xgraph(self,

# Possibly replace Input layers with CvxInput layers
xlayers = [net[op_id] for op_id in schedule]

xgraph = self.xgraph_factory.build_from_xlayer(
net=xlayers,
name='relay_xgraph',
Expand Down
2 changes: 1 addition & 1 deletion python/pyxir/graph/ops/l1_basic_nn.py
Original file line number Diff line number Diff line change
Expand Up @@ -164,7 +164,7 @@ def concat(op_name: str, input_layers: List[XLayer], axis: int, **kwargs):
# TODO workaround for concatenating when batch is -1 and some other constant
if len(check) > 1 and -1 in check:
check.remove(-1)
assert i == axis or len(check) == 1
assert i == axis or len(check) == 1, "i: {0}, axis: {1}, check: {2}".format(i, axis, check)

shape = input_layers[0].shapes[:]
shape[axis] = sum([il.shapes[axis] for il in input_layers])
Expand Down
44 changes: 25 additions & 19 deletions python/pyxir/graph/ops/l2_convolution.py
Original file line number Diff line number Diff line change
Expand Up @@ -69,7 +69,7 @@ def batch_norm(
gamma_layer: XLayer,
beta_layer: XLayer,
axis: int,
epsilon: float,
epsilon: float = 1e-5,
**kwargs
) -> XLayer:
"""
Expand Down Expand Up @@ -136,13 +136,13 @@ def conv2d(
input_layer: XLayer,
weights_layer: XLayer,
kernel_size: List[int],
strides: List[int],
padding_hw: List[int],
dilation: List[int],
groups: int,
channels: int,
data_layout: str,
kernel_layout: str,
strides: List[int] = [1, 1],
padding_hw: List[int] = [0, 0, 0, 0],
dilation: List[int] = [1, 1],
groups: int = 1,
channels: int = None,
data_layout: str = "NCHW",
kernel_layout: str = "OIHW",
target_kernel_layout: str = "OIHW",
**kwargs
) -> XLayer:
Expand Down Expand Up @@ -222,6 +222,7 @@ def conv2d(
logger.debug("-- channels: {}".format(channels))

assert channels is None or out_ch == channels
channels = out_ch if channels is None else channels

B = np.zeros([out_ch], dtype=np.float32)
data = ConvData(W, B)
Expand All @@ -230,7 +231,11 @@ def conv2d(
insize = [input_layer.shapes[H_idx], input_layer.shapes[W_idx]]
batches = input_layer.shapes[0]
logger.debug("-- in shape: {}".format(input_layer.shapes))
assert input_layer.shapes[C_idx] == in_ch
# assert (
# input_layer.shapes[C_idx] == in_ch,
# "Expected number of input channels (in_ch) was {0} but got {1}"\
# .format(in_ch, input_layer.shapes[C_idx])
# )

logger.debug("-- padding (t,b,l,r): {}".format((pad_ht, pad_hb, pad_wl, pad_wr)))

Expand Down Expand Up @@ -316,13 +321,13 @@ def conv2d_transpose(
input_layer: XLayer,
weights_layer: XLayer,
kernel_size: List[int],
strides: List[int],
padding_hw: List[int],
dilation: List[int],
groups: int,
channels: int,
data_layout: str,
kernel_layout: str,
strides: List[int] = [1, 1],
padding_hw: List[int] = [0, 0, 0, 0],
dilation: List[int] = [1, 1],
groups: int = 1,
channels: int = None,
data_layout: str = "NCHW",
kernel_layout: str = "OIHW",
target_kernel_layout: str = "OIHW",
**kwargs
) -> XLayer:
Expand Down Expand Up @@ -391,6 +396,7 @@ def conv2d_transpose(
logger.debug("-- channels: {}".format(channels))

assert channels is None or out_ch == channels
channels = out_ch if channels is None else channels

B = np.zeros([out_ch], dtype=np.float32)
data = ConvData(W, B)
Expand Down Expand Up @@ -638,9 +644,9 @@ def pool2d(
input_layer: XLayer,
pool_type: str,
pool_size: List[int],
strides: List[int],
padding: List[int],
layout: str,
strides: List[int] = [1, 1],
padding: List[int] = [0, 0, 0, 0],
layout: str = "NCHW",
ceil_mode: bool = False,
count_include_pad: bool = False,
**kwargs
Expand Down
45 changes: 20 additions & 25 deletions python/pyxir/graph/ops/l3_math_and_transformations.py
Original file line number Diff line number Diff line change
Expand Up @@ -419,7 +419,7 @@ def take(attrs: Dict[str, Any], in_xlayers: List[XLayer]) -> Dict[str, List[int]
#############

@xop_register_factory('Transpose')
def transpose(op_name: str, input_layer: XLayer, axes: List[int], **kwargs):
def transpose(op_name: str, input_layer: XLayer, axes: List[int], internal=0, **kwargs):
"""
Create a Transpose XLayer
Expand All @@ -432,31 +432,26 @@ def transpose(op_name: str, input_layer: XLayer, axes: List[int], **kwargs):
input_layer: XLayer
The input layer to this scaling layer
"""
if 'Constant' in input_layer.type:
# precompute
X = input_layer._replace(
data=np.transpose(input_layer.data, tuple(axes))
)
else:
bottoms = [input_layer.name]
bottoms = [input_layer.name]

new_shape = TensorShape([input_layer.shapes[i] for i in axes])
new_shape = TensorShape([input_layer.shapes[i] for i in axes])

attrs = kwargs
attrs.update({
'axes': axes
})
attrs = kwargs
attrs.update({
'axes': axes
})

X = XLayer()
X = X._replace(
name=op_name,
type=['Transpose'],
shapes=new_shape,
sizes=new_shape.get_size(),
layer=[op_name],
tops=[],
bottoms=bottoms,
attrs=attrs,
targets=[]
)
X = XLayer()
X = X._replace(
name=op_name,
type=['Transpose'],
shapes=new_shape,
sizes=new_shape.get_size(),
layer=[op_name],
tops=[],
bottoms=bottoms,
attrs=attrs,
internal=internal,
targets=[]
)
return X
2 changes: 0 additions & 2 deletions python/pyxir/graph/optimization/conditions.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,8 +15,6 @@
"""
Module containing XLayer condition functions for graph optimization
passes
"""

import numpy as np
Expand Down
60 changes: 1 addition & 59 deletions python/pyxir/graph/optimization/optimizers/basic_optimizer.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,63 +32,12 @@ class XGraphBasicOptimizer(XGraphBaseOptimizer):
def __init__(self, xgraph, copy=False):
super(XGraphBasicOptimizer, self).__init__(xgraph, copy)

# 1. Merge transposes
# opt_pass = XGraphOptimizationPass(
# name='BasicOptimizationPass-1',
# output_png='after_basic_merge_transposes.png',
# repeat_until_stable=True
# )

# logger.info("Add MergeTransposes pass")
# opt_pass.add_optimization(
# condition_func=lambda bXs, X, tXs:
# all([tX.type[0] == 'Transpose' for tX in tXs]),
# opt_func=optimizations.merge_transposes,
# name='MergeTransposes'
# )

# self.add_optimization_pass(10, opt_pass)

# 2. Expand transposes
# opt_pass = XGraphOptimizationPass(
# name='BasicOptimizationPass-2',
# output_png='after_expand_transposes.png',
# repeat_until_stable=True
# )

# logger.info("Add ExpandTransposes pass")
# opt_pass.add_optimization(
# condition_func=lambda bXs, X, tXs:
# X.type[0] == 'Transpose',
# opt_func=optimizations.expand_transposes,
# name='ExpandTransposes'
# )

# self.add_optimization_pass(20, opt_pass)

# 2. Sweep transposes
# opt_pass = XGraphOptimizationPass(
# name='BasicOptimizationPass-2',
# output_png='after_basic_sweep_transposes.png',
# repeat_until_stable=True
# )

# logger.info("Add SweepTransposesFlow pass")
# opt_pass.add_optimization(
# condition_func=lambda bXs, X, tXs:
# all([bX.type[0] == 'Transpose' for bX in bXs]),
# opt_func=optimizations.sweep_transposes_flow,
# name='SweepTransposesFlowDirection',
# target=target
# )

# self.add_optimization_pass(20, opt_pass)

# 1.
opt_pass = XGraphOptimizationPass(
name='BasicOptimizationPass-1',
output_png='after_basic_optimizations.png'
)

logger.info("Add RemoveScalingBy1Layers pass")
opt_pass.add_optimization(
condition_func=lambda bXs, X, tXs:
Expand Down Expand Up @@ -129,11 +78,4 @@ def __init__(self, xgraph, copy=False):
opt_func=optimizations.merge_bias,
name='MergeBiasIntoConvDense'
)
# import pdb; pdb.set_trace()
# logger.info("Add TransformMulAndMaxIntoLeakyRelu pass")
# opt_pass.add_optimization(
# condition_func=lambda bXs, X, tXs: 'Maximum' in X.type,
# opt_func=optimizations.transform_mul_and_max_into_lrelu,
# name='TransformMulAndMaxIntoLeakyRelu'
# )
self.add_optimization_pass(10, opt_pass)
Loading

0 comments on commit 485b7c1

Please sign in to comment.