diff --git a/python/pyxir/graph/ops/l2_convolution.py b/python/pyxir/graph/ops/l2_convolution.py index e41f8e2..971facf 100644 --- a/python/pyxir/graph/ops/l2_convolution.py +++ b/python/pyxir/graph/ops/l2_convolution.py @@ -28,11 +28,12 @@ from pyxir.shapes import TensorShape -from ..layer.xlayer import defaultXLayer, XLayer, BatchData, \ - ConvData, ScaleData +from ..layer.xlayer import defaultXLayer, XLayer, BatchData, ConvData, ScaleData from ..layer.xlayer_factory import xop_register_factory, xop_register -from ..xop_registry import xop_register_op_layout_transform,\ - xop_register_op_transpose_transform +from ..xop_registry import ( + xop_register_op_layout_transform, + xop_register_op_transpose_transform, +) logger = logging.getLogger("pyxir") @@ -41,31 +42,36 @@ # Flatten # ########### -@xop_register('Flatten') -def batch_flatten(attrs: Dict[str, Any], in_xlayers: List[XLayer]) -> Dict[str, List[int]]: + +@xop_register("Flatten") +def batch_flatten( + attrs: Dict[str, Any], in_xlayers: List[XLayer] +) -> Dict[str, List[int]]: """Return Batch Flatten registration information (shape)""" assert len(in_xlayers) == 1, "Batch Flatten expects one input layer" flattened_shape = TensorShape( - [list(in_xlayers[0].shapes)[0]] + - [int(np.prod(list(in_xlayers[0].shapes)[1:]))] + [list(in_xlayers[0].shapes)[0]] + [int(np.prod(list(in_xlayers[0].shapes)[1:]))] ) - return {'shape': flattened_shape} + return {"shape": flattened_shape} ############# # BatchNorm # ############# -@xop_register_factory('BatchNorm') -def batch_norm(op_name: str, - input_layer: XLayer, - mean_layer: XLayer, - variance_layer: XLayer, - gamma_layer: XLayer, - beta_layer: XLayer, - axis: int, - epsilon: float, - **kwargs): + +@xop_register_factory("BatchNorm") +def batch_norm( + op_name: str, + input_layer: XLayer, + mean_layer: XLayer, + variance_layer: XLayer, + gamma_layer: XLayer, + beta_layer: XLayer, + axis: int, + epsilon: float, + **kwargs +) -> XLayer: """ Create a batch normalization parameters layer @@ -79,21 +85,22 @@ def batch_norm(op_name: str, bottoms = [input_layer.name] attrs = kwargs - attrs.update({ - 'epsilon': epsilon, - 'axis': axis, - }) + attrs.update( + { + "epsilon": epsilon, + "axis": axis, + } + ) mean, variance = mean_layer.data[0], variance_layer.data[0] gamma, beta = gamma_layer.data[0], beta_layer.data[0] assert mean.shape == variance.shape - bn_data = BatchData(mu=mean, sigma_square=variance, - gamma=gamma, beta=beta) + bn_data = BatchData(mu=mean, sigma_square=variance, gamma=gamma, beta=beta) X = XLayer() X = X._replace( name=op_name, - type=['BatchNorm'], + type=["BatchNorm"], shapes=input_layer.shapes[:], sizes=input_layer.sizes[:], data=bn_data, @@ -101,40 +108,44 @@ def batch_norm(op_name: str, tops=[], bottoms=bottoms, attrs=attrs, - targets=[] + targets=[], ) return X -@xop_register_op_transpose_transform('BatchNorm') +@xop_register_op_transpose_transform("BatchNorm") def batchnorm_transpose_transform(X: XLayer, axes: List[int]) -> None: - """ Transform batch normalization layer with transpose according to - provided axes """ + """Transform batch normalization layer with transpose according to + provided axes""" new_shape = TensorShape([X.shapes[i] for i in axes]) X.shapes = new_shape - X.attrs['axis'] = axes.index(X.attrs['axis']) + X.attrs["axis"] = axes.index(X.attrs["axis"]) ########## # Conv2D # ########## -@xop_register_factory('Convolution') -def conv2d(op_name: str, - input_layer: XLayer, - weights_layer: XLayer, - kernel_size: List[int], - strides: List[int], - padding_hw: List[int], - dilation: List[int], - groups: int, - channels: int, - data_layout: str, - kernel_layout: str, - **kwargs): + +@xop_register_factory("Convolution") +def conv2d( + op_name: str, + input_layer: XLayer, + weights_layer: XLayer, + kernel_size: List[int], + strides: List[int], + padding_hw: List[int], + dilation: List[int], + groups: int, + channels: int, + data_layout: str, + kernel_layout: str, + target_kernel_layout: str = "OIHW", + **kwargs +) -> XLayer: """ Create a conv2d XLayer @@ -142,6 +153,10 @@ def conv2d(op_name: str, --------- op_name: str The name of this conv2d layer operation + input_layer: XLayer + The input layer to this conv2d layer + weights_layer: XLayer + The weights input layer to this conv2d layer kernel_size: List[int] The size of the kernel windows strides: List[int] @@ -160,20 +175,18 @@ def conv2d(op_name: str, The layout of the conv2d layer input (`NCHW` or `NHWC`) kernel_layout: str The layout of the conv2d layer kernel (`OIHW`, `HWIO` or `OHWI`) - input_layer: XLayer - The input layer to this conv2d layer - weights_layer: XLayer - The weights input layer to this conv2d layer + target_kernel_layout: str + The target layout of the conv2d layer kernel (`OIHW`, `HWIO` or `OHWI`) """ - assert 'Constant' in weights_layer.type + assert "Constant" in weights_layer.type assert len(kernel_size) == 2 assert len(dilation) == 2 assert len(strides) == 2 assert len(padding_hw) in [2, 4] - layout_idx = tuple([data_layout.index(e) for e in 'NCHW']) + layout_idx = tuple([data_layout.index(e) for e in "NCHW"]) layout_idx_transpose = tuple(["NCHW".index(e) for e in data_layout]) B_idx, C_idx, H_idx, W_idx = layout_idx @@ -182,14 +195,16 @@ def conv2d(op_name: str, logger.debug("-- Conv2D Kernel layout: {}".format(kernel_layout)) logger.debug("-- Conv2D W shape: {}".format(weights_layer.data[0].shape)) - if len(kernel_layout) != 4 or \ - sorted(kernel_layout) != ['H', 'I', 'O', 'W']: - raise NotImplementedError("Unsupported kernel layout: {} for" - " convolution: {}, should be a permutation" - " of `OIHW`" - .format(kernel_layout, op_name)) - transpose_axes = tuple([kernel_layout.index(e) for e in 'OIHW']) + if len(kernel_layout) != 4 or sorted(kernel_layout) != ["H", "I", "O", "W"]: + raise NotImplementedError( + "Unsupported kernel layout: {} for" + " convolution: {}, should be a permutation" + " of `OIHW`".format(kernel_layout, op_name) + ) + transpose_axes = tuple([kernel_layout.index(e) for e in target_kernel_layout]) W = np.transpose(weights_layer.data[0], transpose_axes) + kernel_layout_idx = tuple([target_kernel_layout.index(e) for e in "OIHW"]) + kO_idx, kI_idx, kH_idx, kW_idx = kernel_layout_idx if len(padding_hw) == 4: pad_ht, pad_hb, pad_wl, pad_wr = padding_hw @@ -197,15 +212,16 @@ def conv2d(op_name: str, pad_ht, pad_wl = padding_hw pad_hb, pad_wr = padding_hw else: - raise ValueError("'padding_hw' argument should be a list of length 2" - " but got: {}".format(len(padding_hw))) + raise ValueError( + "'padding_hw' argument should be a list of length 2" + " but got: {}".format(len(padding_hw)) + ) - # W is now in OIHW shape - in_ch, out_ch = W.shape[1] * groups, W.shape[0] + in_ch, out_ch = W.shape[kI_idx] * groups, W.shape[kO_idx] logger.debug("-- in_ch: {}, out_ch: {}".format(in_ch, out_ch)) logger.debug("-- channels: {}".format(channels)) - assert(channels is None or out_ch == channels) + assert channels is None or out_ch == channels B = np.zeros([out_ch], dtype=np.float32) data = ConvData(W, B) @@ -216,21 +232,27 @@ def conv2d(op_name: str, logger.debug("-- in shape: {}".format(input_layer.shapes)) assert input_layer.shapes[C_idx] == in_ch - logger.debug("-- padding (t,b,l,r): {}" - .format((pad_ht, pad_hb, pad_wl, pad_wr))) + logger.debug("-- padding (t,b,l,r): {}".format((pad_ht, pad_hb, pad_wl, pad_wr))) - # TODO dilation - out_h = \ - int((insize[0] + pad_ht + pad_hb - kernel_size[0]) / strides[0] + 1) - out_w = \ - int((insize[1] + pad_wl + pad_wr - kernel_size[1]) / strides[1] + 1) + out_h = int( + (insize[0] + pad_ht + pad_hb - dilation[0] * (kernel_size[0] - 1) - 1) + / strides[0] + + 1 + ) + out_w = int( + (insize[1] + pad_wl + pad_wr - dilation[1] * (kernel_size[1] - 1) - 1) + / strides[1] + + 1 + ) - out_shape = TensorShape([[batches, out_ch, out_h, out_w][i] for i in layout_idx_transpose]) + out_shape = TensorShape( + [[batches, out_ch, out_h, out_w][i] for i in layout_idx_transpose] + ) padding_hh = [pad_ht, pad_hb] padding_ww = [pad_wl, pad_wr] - if data_layout == 'NCHW': + if data_layout == "NCHW": granular_padding = [[0, 0], [0, 0], padding_hh, padding_ww] else: granular_padding = [[0, 0], padding_hh, padding_ww, [0, 0]] @@ -238,22 +260,24 @@ def conv2d(op_name: str, logger.debug("-- out shape: {}".format(out_shape)) attrs = kwargs - attrs.update({ - 'padding': granular_padding, - 'data_layout': data_layout, - 'kernel_layout': 'OIHW', - 'shape': out_shape.tolist(), - 'kernel_size': kernel_size, - 'strides': strides, - 'groups': groups, - 'dilation': dilation, - 'channels': [in_ch, out_ch] - }) + attrs.update( + { + "padding": granular_padding, + "data_layout": data_layout, + "kernel_layout": target_kernel_layout, + "shape": out_shape.tolist(), + "kernel_size": kernel_size, + "strides": strides, + "groups": groups, + "dilation": dilation, + "channels": [in_ch, out_ch], + } + ) X = XLayer() X = X._replace( name=op_name, - type=['Convolution'], + type=["Convolution"], shapes=out_shape, sizes=out_shape.get_size(), data=data, @@ -261,22 +285,23 @@ def conv2d(op_name: str, tops=[], bottoms=bottoms, attrs=attrs, - targets=[]) + targets=[], + ) return X -@xop_register_op_layout_transform('Convolution') +@xop_register_op_layout_transform("Convolution") def conv2d_layout_transform(X: XLayer, target_layout: str) -> None: """ Transform layout of provided XLayer to target layout """ - layout = X.attrs['data_layout'] + layout = X.attrs["data_layout"] axes_transpose = [layout.index(e) for e in target_layout] # TODO: strides, dilations - X.attrs['padding'] = [X.attrs['padding'][i] for i in axes_transpose] - X.attrs['data_layout'] = target_layout + X.attrs["padding"] = [X.attrs["padding"][i] for i in axes_transpose] + X.attrs["data_layout"] = target_layout X.shapes[:] = TensorShape([X.shapes[i] for i in axes_transpose]) @@ -284,19 +309,23 @@ def conv2d_layout_transform(X: XLayer, target_layout: str) -> None: # Conv2DTranspose # ################### -@xop_register_factory('Conv2DTranspose') -def conv2d_transpose(op_name: str, - input_layer: XLayer, - weights_layer: XLayer, - kernel_size: List[int], - strides: List[int], - padding_hw: List[int], - dilation: List[int], - groups: int, - channels: int, - data_layout: str, - kernel_layout: str, - **kwargs) -> XLayer: + +@xop_register_factory("Conv2DTranspose") +def conv2d_transpose( + op_name: str, + input_layer: XLayer, + weights_layer: XLayer, + kernel_size: List[int], + strides: List[int], + padding_hw: List[int], + dilation: List[int], + groups: int, + channels: int, + data_layout: str, + kernel_layout: str, + target_kernel_layout: str = "OIHW", + **kwargs +) -> XLayer: """ Create a Conv2DTranspose XLayer @@ -304,6 +333,10 @@ def conv2d_transpose(op_name: str, --------- op_name: str The name of this conv2d layer operation + input_layer: XLayer + The input layer to this conv2d layer + weights_layer: XLayer + The weights input layer to this conv2d layer kernel_size: List[int] The size of the kernel windows strides: List[int] @@ -322,31 +355,24 @@ def conv2d_transpose(op_name: str, The layout of the conv2d layer input (`NCHW` or `NHWC`) kernel_layout: str The layout of the conv2d layer kernel (`OIHW`, `HWIO` or `OHWI`) - input_layer: XLayer - The input layer to this conv2d layer - weights_layer: XLayer - The weights input layer to this conv2d layer + target_kernel_layout: str + The target layout of the conv2d layer kernel (`OIHW`, `HWIO` or `OHWI`) """ bottoms = [input_layer.name] logger.debug("-- Conv2DTranspose Kernel layout: {}".format(kernel_layout)) - logger.debug("-- Conv2DTranspose W shape: {}" - .format(weights_layer.data[0].shape)) - - # Convert kernel to 'OIHW' layout - if kernel_layout == 'OIHW': - W = weights_layer.data[0] - elif kernel_layout == 'HWIO': - W = np.transpose(weights_layer.data[0], (3, 2, 0, 1)) - elif kernel_layout == 'IOHW': - W = np.transpose(weights_layer.data[0], (1, 0, 2, 3)) - elif kernel_layout == 'OHWI': - W = np.transpose(weights_layer.data[0], (0, 3, 1, 2)) - else: - raise NotImplementedError("Unsupported kernel layout: {} for" - " convolution: {}, should be one of `OIHW`" - ", `HWIO`, `IOHW` or `OHWI`." - .format(kernel_layout, op_name)) + logger.debug("-- Conv2DTranspose W shape: {}".format(weights_layer.data[0].shape)) + + if len(kernel_layout) != 4 or sorted(kernel_layout) != ["H", "I", "O", "W"]: + raise NotImplementedError( + "Unsupported kernel layout: {} for" + " convolution: {}, should be a permutation" + " of `OIHW`".format(kernel_layout, op_name) + ) + transpose_axes = tuple([kernel_layout.index(e) for e in target_kernel_layout]) + W = np.transpose(weights_layer.data[0], transpose_axes) + kernel_layout_idx = tuple([target_kernel_layout.index(e) for e in "OIHW"]) + kO_idx, kI_idx, kH_idx, kW_idx = kernel_layout_idx assert len(padding_hw) in [2, 4] if len(padding_hw) == 4: @@ -355,11 +381,12 @@ def conv2d_transpose(op_name: str, pad_ht, pad_wl = padding_hw pad_hb, pad_wr = padding_hw else: - raise ValueError("'padding_hw' argument should be a list of length 2" - " but got: {}".format(len(padding_hw))) + raise ValueError( + "'padding_hw' argument should be a list of length 2" + " but got: {}".format(len(padding_hw)) + ) - # W is now in OIHW shape - in_ch, out_ch = W.shape[1] * groups, W.shape[0] + in_ch, out_ch = W.shape[kI_idx] * groups, W.shape[kO_idx] logger.debug("-- in_ch: {}, out_ch: {}".format(in_ch, out_ch)) logger.debug("-- channels: {}".format(channels)) @@ -375,50 +402,57 @@ def conv2d_transpose(op_name: str, logger.debug("{} {}".format(input_layer.shapes, in_ch)) assert input_layer.shapes[1] == in_ch - if padding_hw[0] == (kernel_size[0] - strides[0]) / 2 and\ - padding_hw[1] == (kernel_size[1] - strides[1]) / 2: - padding_type = 'SAME' + if ( + padding_hw[0] == (kernel_size[0] - strides[0]) / 2 + and padding_hw[1] == (kernel_size[1] - strides[1]) / 2 + ): + padding_type = "SAME" elif padding_hw[0] == 0 and padding_hw[1] == 0: - padding_type = 'VALID' + padding_type = "VALID" else: - raise NotImplementedError("Unsupported padding for Conv2DTranspose" - " Only Tensorflow padding 'SAME' and 'VALID'" - " are supported but got: {} which does not" - " translate to 'SAME' == [{}, {}] or 'VALID'" - " == [0, 0]" - .format(padding_hw, - (kernel_size[0] - strides[0]) / 2, - (kernel_size[1] - strides[1]) / 2)) - - if padding_type == 'SAME': + raise NotImplementedError( + "Unsupported padding for Conv2DTranspose" + " Only Tensorflow padding 'SAME' and 'VALID'" + " are supported but got: {} which does not" + " translate to 'SAME' == [{}, {}] or 'VALID'" + " == [0, 0]".format( + padding_hw, + (kernel_size[0] - strides[0]) / 2, + (kernel_size[1] - strides[1]) / 2, + ) + ) + + if padding_type == "SAME": out_h = insize[0] * strides[0] out_w = insize[1] * strides[1] - elif padding_type == 'VALID': + elif padding_type == "VALID": out_h = (insize[0] - 1) * strides[0] + kernel_size[0] out_w = (insize[1] - 1) * strides[1] + kernel_size[1] out_shape = TensorShape([batches, out_ch, out_h, out_w]) padding = [[0, 0], [0, 0], [pad_ht, pad_hb], [pad_wl, pad_wr]] - padding = [padding['NCHW'.index(i)] for i in data_layout] + padding = [padding["NCHW".index(i)] for i in data_layout] attrs = kwargs - attrs.update({ - 'padding': padding, - 'data_layout': data_layout, - 'kernel_layout': 'OIHW', - 'shape': out_shape.tolist(), - 'kernel_size': kernel_size, - 'strides': strides, - 'groups': groups, - 'dilation': dilation, - 'channels': [in_ch, out_ch] - }) + attrs.update( + { + "padding": padding, + "data_layout": data_layout, + "kernel_layout": "OIHW", + "shape": out_shape.tolist(), + "kernel_size": kernel_size, + "strides": strides, + "groups": groups, + "dilation": dilation, + "channels": [in_ch, out_ch], + } + ) X = XLayer() X = X._replace( name=op_name, - type=['Conv2DTranspose'], + type=["Conv2DTranspose"], shapes=out_shape, sizes=out_shape.get_size(), data=data, @@ -426,21 +460,22 @@ def conv2d_transpose(op_name: str, tops=[], bottoms=bottoms, attrs=attrs, - targets=[]) + targets=[], + ) return X -@xop_register_op_layout_transform('Conv2DTranspose') +@xop_register_op_layout_transform("Conv2DTranspose") def conv2d_transpose_layout_transform(X: XLayer, target_layout: str) -> None: """Transform layout of provided XLayer to target layout""" - layout = X.attrs['data_layout'] + layout = X.attrs["data_layout"] axes_transpose = [layout.index(e) for e in target_layout] # TODO: strides, dilations - X.attrs['padding'] = [X.attrs['padding'][i] for i in axes_transpose] - X.attrs['data_layout'] = target_layout + X.attrs["padding"] = [X.attrs["padding"][i] for i in axes_transpose] + X.attrs["data_layout"] = target_layout X.shapes = TensorShape([X.shapes[i] for i in axes_transpose]) @@ -448,12 +483,11 @@ def conv2d_transpose_layout_transform(X: XLayer, target_layout: str) -> None: # Global Pooling # ################## -@xop_register_factory('GlobalPooling') -def global_pool2d(op_name: str, - input_layer: XLayer, - pool_type: str, - layout: str, - **kwargs) -> XLayer: + +@xop_register_factory("GlobalPooling") +def global_pool2d( + op_name: str, input_layer: XLayer, pool_type: str, layout: str, **kwargs +) -> XLayer: """ Create a global pooling XLayer @@ -469,9 +503,11 @@ def global_pool2d(op_name: str, The input layer to this pooling layer """ - if pool_type not in ['Max', 'Avg']: - raise NotImplementedError("Invalid pooling type: {}, can either be" - " `Max` or `Avg`.".format(pool_type)) + if pool_type not in ["Max", "Avg"]: + raise NotImplementedError( + "Invalid pooling type: {}, can either be" + " `Max` or `Avg`.".format(pool_type) + ) # NCHW by design insize = [input_layer.shapes[2], input_layer.shapes[3]] @@ -484,31 +520,36 @@ def global_pool2d(op_name: str, out_h, out_w = 1, 1 attrs = kwargs - attrs.update({ - 'padding': [[0, 0], [0, 0], [0, 0], [0, 0]], - 'insize': insize, - 'outsize': [out_h, out_w], - 'data_layout': layout, - 'strides': strides, - 'kernel_size': pool_size, - 'pool_type': pool_type, - # 'channels': [channels, channels] - }) - out_shape = TensorShape([batches, channels, out_h, out_w] - if layout == 'NCHW' - else [batches, out_h, out_w, channels]) + attrs.update( + { + "padding": [[0, 0], [0, 0], [0, 0], [0, 0]], + "insize": insize, + "outsize": [out_h, out_w], + "data_layout": layout, + "strides": strides, + "kernel_size": pool_size, + "pool_type": pool_type, + # 'channels': [channels, channels] + } + ) + out_shape = TensorShape( + [batches, channels, out_h, out_w] + if layout == "NCHW" + else [batches, out_h, out_w, channels] + ) X = XLayer() X = X._replace( name=op_name, - type=['Pooling'], + type=["Pooling"], shapes=out_shape, sizes=out_shape.get_size(), attrs=attrs, layer=[op_name], tops=[], bottoms=[input_layer.name], - targets=[]) + targets=[], + ) return X @@ -516,12 +557,11 @@ def global_pool2d(op_name: str, # Pad # ####### -@xop_register_factory('Pad') -def pad(op_name: str, - input_layer: XLayer, - padding: List[int], - pad_value: float, - **kwargs) -> XLayer: + +@xop_register_factory("Pad") +def pad( + op_name: str, input_layer: XLayer, padding: List[int], pad_value: float, **kwargs +) -> XLayer: """ Create a padding XLayer @@ -539,69 +579,72 @@ def pad(op_name: str, The input layer to this pooling layer """ if pad_value != 0: - raise NotImplementedError("Unsupported padding value: {}, only 0 is" - " supported for now.".format(pad_value)) + raise NotImplementedError( + "Unsupported padding value: {}, only 0 is" + " supported for now.".format(pad_value) + ) if not len(input_layer.shapes) == 4: - raise NotImplementedError("Padding layer only supported after layer in" - " `NCHW` or `NHWC` format, but found layer" - " with {} dims" - .format(len(input_layer.shapes))) + raise NotImplementedError( + "Padding layer only supported after layer in" + " `NCHW` or `NHWC` format, but found layer" + " with {} dims".format(len(input_layer.shapes)) + ) - unpadded_dims = [[0, 0]] * len(input_layer.shapes[len(padding):]) + unpadded_dims = [[0, 0]] * len(input_layer.shapes[len(padding) :]) padding = unpadded_dims + [list(pad) for pad in padding] - shape = TensorShape([s + p[0] + p[1] - for s, p in zip(input_layer.shapes, padding)]) + shape = TensorShape([s + p[0] + p[1] for s, p in zip(input_layer.shapes, padding)]) logger.debug("-- Pad shape: {}".format(shape)) attrs = kwargs - attrs.update({ - 'padding': padding - }) + attrs.update({"padding": padding}) X = XLayer() X = X._replace( name=op_name, - type=['Pad'], + type=["Pad"], shapes=shape, sizes=shape.get_size(), attrs=attrs, layer=[op_name], tops=[], bottoms=[input_layer.name], - targets=[] + targets=[], ) return X -@xop_register_op_transpose_transform('Pad') +@xop_register_op_transpose_transform("Pad") def padding_transpose_transform(X: XLayer, axes: List[int]) -> None: """ Transform padding layer with transpose according to provided axes """ new_shape = [X.shapes[i] for i in axes] X.shapes = TensorShape(new_shape) - new_padding = [X.attrs['padding'][i] for i in axes] + new_padding = [X.attrs["padding"][i] for i in axes] # X.data[:] = new_padding - X.attrs['padding'] = new_padding + X.attrs["padding"] = new_padding ########### # Pooling # ########### -@xop_register_factory('Pooling') -def pool2d(op_name: str, - input_layer: XLayer, - pool_type: str, - pool_size: List[int], - strides: List[int], - padding: List[int], - layout: str, - ceil_mode: bool = False, - count_include_pad: bool = False, - **kwargs) -> XLayer: + +@xop_register_factory("Pooling") +def pool2d( + op_name: str, + input_layer: XLayer, + pool_type: str, + pool_size: List[int], + strides: List[int], + padding: List[int], + layout: str, + ceil_mode: bool = False, + count_include_pad: bool = False, + **kwargs +) -> XLayer: """ Create a pooling XLayer @@ -627,35 +670,47 @@ def pool2d(op_name: str, input_layer: XLayer The input layer to this pooling layer """ - if layout not in ['NCHW', 'NHWC']: - raise ValueError("Unsupported layout: {}, supported layouts are" - "NCHW and NHWC".format(layout)) + if layout not in ["NCHW", "NHWC"]: + raise ValueError( + "Unsupported layout: {}, supported layouts are" + "NCHW and NHWC".format(layout) + ) - if pool_type not in ['Max', 'Avg']: - raise NotImplementedError("Invalid pooling type: {}, can either be" - " `Max` or `Avg`.".format(pool_type)) + if pool_type not in ["Max", "Avg"]: + raise NotImplementedError( + "Invalid pooling type: {}, can either be" + " `Max` or `Avg`.".format(pool_type) + ) - def valid(x, k, p1, p2, s): return math.floor((x+p1+p2-k)/s) + 1 + def valid(x, k, p1, p2, s): + return math.floor((x + p1 + p2 - k) / s) + 1 - def full(x, k, p1, p2, s): return math.ceil((x+p1+p2-k)/s) + 1 + def full(x, k, p1, p2, s): + return math.ceil((x + p1 + p2 - k) / s) + 1 # TODO: this is very similar as for NNVM operators -> merge if len(padding) == 4: # top bottom left right = h_before h_after w_before w_after - full_paddings = \ - [[0, 0], [0, 0], [padding[0], padding[2]], - [padding[1], padding[3]]] + full_paddings = [ + [0, 0], + [0, 0], + [padding[0], padding[2]], + [padding[1], padding[3]], + ] elif len(padding) == 2: - full_paddings = \ - [[0, 0], [0, 0], [padding[0], padding[0]], - [padding[1], padding[1]]] + full_paddings = [ + [0, 0], + [0, 0], + [padding[0], padding[0]], + [padding[1], padding[1]], + ] elif len(padding) == 1: - full_paddings = [[0, 0], [0, 0], [padding, padding], - [padding, padding]] + full_paddings = [[0, 0], [0, 0], [padding, padding], [padding, padding]] else: - raise ValueError("Invalid padding size passed by Relay operator, " - " Sizes of 1, 2 and 4 are supported but not {}" - .format(len(padding))) + raise ValueError( + "Invalid padding size passed by Relay operator, " + " Sizes of 1, 2 and 4 are supported but not {}".format(len(padding)) + ) # if full_paddings[2][0] != full_paddings[2][1] \ # or full_paddings[3][0] != full_paddings[3][1]: @@ -663,10 +718,12 @@ def full(x, k, p1, p2, s): return math.ceil((x+p1+p2-k)/s) + 1 # "Padding will be symmetrized for running on FPGA." # .format(op_name)) - padding = [min(full_paddings[2][0], full_paddings[2][1]), - min(full_paddings[3][0], full_paddings[3][1])] + padding = [ + min(full_paddings[2][0], full_paddings[2][1]), + min(full_paddings[3][0], full_paddings[3][1]), + ] - if layout == 'NCHW': + if layout == "NCHW": insize = [input_layer.shapes[2], input_layer.shapes[3]] batches, channels = input_layer.shapes[0], input_layer.shapes[1] else: @@ -679,57 +736,71 @@ def full(x, k, p1, p2, s): return math.ceil((x+p1+p2-k)/s) + 1 calc_func = full if ceil_mode else valid outsize = [ - calc_func(insize[1], pool_size[1], full_paddings[3][0], - full_paddings[3][1], strides[1]), - calc_func(insize[0], pool_size[0], full_paddings[2][0], - full_paddings[2][1], strides[0]) + calc_func( + insize[1], + pool_size[1], + full_paddings[3][0], + full_paddings[3][1], + strides[1], + ), + calc_func( + insize[0], + pool_size[0], + full_paddings[2][0], + full_paddings[2][1], + strides[0], + ), ] attrs = kwargs - attrs.update({ - 'type': pool_type, - 'padding': full_paddings, - 'strides': strides, # HW - 'kernel_size': pool_size, # HW - 'insize': insize, # HW - 'outsize': [outsize[1], outsize[0]], # HW - 'data_layout': layout, - 'pool_type': pool_type - }) - if pool_type == 'Avg': - attrs['count_include_pad'] = count_include_pad + attrs.update( + { + "type": pool_type, + "padding": full_paddings, + "strides": strides, # HW + "kernel_size": pool_size, # HW + "insize": insize, # HW + "outsize": [outsize[1], outsize[0]], # HW + "data_layout": layout, + "pool_type": pool_type, + } + ) + if pool_type == "Avg": + attrs["count_include_pad"] = count_include_pad out_h, out_w = outsize[1], outsize[0] - out_shape = TensorShape([batches, channels, out_h, out_w] - if layout == 'NCHW' - else [batches, out_h, out_w, channels]) + out_shape = TensorShape( + [batches, channels, out_h, out_w] + if layout == "NCHW" + else [batches, out_h, out_w, channels] + ) X = XLayer() X = X._replace( name=op_name, - type=['Pooling'], + type=["Pooling"], shapes=out_shape, sizes=out_shape.get_size(), attrs=attrs, layer=[op_name], tops=[], bottoms=[input_layer.name], - targets=[] + targets=[], ) return X -@xop_register_op_layout_transform('Pooling') +@xop_register_op_layout_transform("Pooling") def pooling_layout_transform(X: XLayer, target_layout: str) -> None: """ Transform layout of provided XLayer to target layout """ - layout = X.attrs['data_layout'] + layout = X.attrs["data_layout"] axes_transpose = [layout.index(e) for e in target_layout] # TODO: strides, dilations - X.attrs['padding'] = [X.attrs['padding'][i] for i in axes_transpose] - X.attrs['data_layout'] = target_layout + X.attrs["padding"] = [X.attrs["padding"][i] for i in axes_transpose] + X.attrs["data_layout"] = target_layout X.shapes = TensorShape([X.shapes[i] for i in axes_transpose]) @@ -737,8 +808,11 @@ def pooling_layout_transform(X: XLayer, target_layout: str) -> None: # Upsampling2D # ################ -@xop_register('Upsampling2D') -def upsampling2d(attrs: Dict[str, Any], in_xlayers: List[XLayer]) -> Dict[str, List[int]]: + +@xop_register("Upsampling2D") +def upsampling2d( + attrs: Dict[str, Any], in_xlayers: List[XLayer] +) -> Dict[str, List[int]]: """ Create 2D Upsampling XLayer @@ -759,35 +833,35 @@ def upsampling2d(attrs: Dict[str, Any], in_xlayers: List[XLayer]) -> Dict[str, L """ assert len(in_xlayers) == 1 - assert 'scale_h' in attrs - assert 'scale_w' in attrs - assert 'data_layout' in attrs - assert 'method' in attrs - if 'align_corners' not in attrs: - attrs['align_corners'] = False + assert "scale_h" in attrs + assert "scale_w" in attrs + assert "data_layout" in attrs + assert "method" in attrs + if "align_corners" not in attrs: + attrs["align_corners"] = False - scale_h = attrs['scale_h'] - scale_w = attrs['scale_w'] + scale_h = attrs["scale_h"] + scale_w = attrs["scale_w"] - layout = attrs['data_layout'] - assert sorted(layout) == ['C', 'H', 'N', 'W'] + layout = attrs["data_layout"] + assert sorted(layout) == ["C", "H", "N", "W"] - h_idx = layout.index('H') - w_idx = layout.index('W') + h_idx = layout.index("H") + w_idx = layout.index("W") shape = in_xlayers[0].shapes[:] shape[h_idx] = int(shape[h_idx] * scale_h) shape[w_idx] = int(shape[w_idx] * scale_w) - return {'shape': shape} + return {"shape": shape} -@xop_register_op_layout_transform('Upsampling2D') +@xop_register_op_layout_transform("Upsampling2D") def upsampling2d_layout_transform(X: XLayer, target_layout: str) -> None: """ Transform layout of provided Upsampling2D XLayer to target layout """ - layout = X.attrs['data_layout'] + layout = X.attrs["data_layout"] axes_transpose = [layout.index(e) for e in target_layout] - X.attrs['data_layout'] = target_layout + X.attrs["data_layout"] = target_layout X.shapes[:] = TensorShape([X.shapes[i] for i in axes_transpose]) diff --git a/python/pyxir/target_registry.py b/python/pyxir/target_registry.py index c22c85f..7ad73b1 100644 --- a/python/pyxir/target_registry.py +++ b/python/pyxir/target_registry.py @@ -60,7 +60,10 @@ def check_target(self, target: str): """ Check whether the target exists """ if not self.is_target(target): # Try importing it on the fly - importlib.import_module("pyxir.contrib.target." + target.split("-")[0]) + try: + importlib.import_module("pyxir.contrib.target." + target.split("-")[0]) + except ModuleNotFoundError: + pass if not self.is_target(target): raise ValueError("Unknown target: {}, registered targets" " are: {}" diff --git a/tests/unit/frontend/tvm/relay_tools/test_relay_l0_other.py b/tests/unit/frontend/tvm/relay_tools/test_relay_l0_other.py index 58b54d7..1ff2023 100644 --- a/tests/unit/frontend/tvm/relay_tools/test_relay_l0_other.py +++ b/tests/unit/frontend/tvm/relay_tools/test_relay_l0_other.py @@ -34,16 +34,13 @@ from pyxir.shapes import TupleShape, TensorShape -class TestRelayL0Other(unittest.TestCase): +class TestRelayL0Other(unittest.TestCase): @unittest.skipIf(skip, "Could not import TVM and/or TVM frontend") def test_var_constant(self): - var = relay.var( - "var", - relay.TensorType((-1, 4, 2, 2), "int64") - ) + var = relay.var("var", relay.TensorType((-1, 4, 2, 2), "int64")) - const = relay.expr.const(np.array([1, -1], dtype=np.int64), 'int64') + const = relay.expr.const(np.array([1, -1], dtype=np.int64), "int64") net = relay.add(var, const) @@ -56,14 +53,14 @@ def test_var_constant(self): layers = xgraph.get_layers() - assert layers[0].type[0] == 'Input' - assert isinstance(layers[0].attrs['dtype'], str) - assert layers[0].attrs['dtype'] == 'int64' - assert 'relay_id' in layers[0].attrs + assert layers[0].type[0] == "Input" + assert isinstance(layers[0].attrs["dtype"], str) + assert layers[0].attrs["dtype"] == "int64" + assert "relay_id" in layers[0].attrs - assert layers[1].type[0] == 'BiasAdd' + assert layers[1].type[0] == "BiasAdd" assert layers[1].shapes == [-1, 4, 2, 2] - assert 'relay_id' in layers[1].attrs + assert "relay_id" in layers[1].attrs @unittest.skipIf(skip, "Could not import TVM and/or TVM frontend") def test_tuple(self): @@ -79,17 +76,17 @@ def test_tuple(self): xg = xf_relay.from_relay(mod, {}) layers = xg.get_layers() - assert layers[0].type[0] == 'Input' - assert isinstance(layers[0].attrs['dtype'], str) - assert layers[0].attrs['dtype'] == 'int64' - assert 'relay_id' in layers[0].attrs + assert layers[0].type[0] == "Input" + assert isinstance(layers[0].attrs["dtype"], str) + assert layers[0].attrs["dtype"] == "int64" + assert "relay_id" in layers[0].attrs - assert layers[1].type[0] == 'Input' - assert isinstance(layers[0].attrs['dtype'], str) - assert layers[0].attrs['dtype'] == 'int64' - assert 'relay_id' in layers[0].attrs + assert layers[1].type[0] == "Input" + assert isinstance(layers[0].attrs["dtype"], str) + assert layers[0].attrs["dtype"] == "int64" + assert "relay_id" in layers[0].attrs - assert layers[2].type[0] == 'Tuple' + assert layers[2].type[0] == "Tuple" assert layers[2].shapes == TupleShape([[-1, 4, 2, 2], [-1, 3, 2, 2]]) @unittest.skipIf(skip, "Could not import TVM and/or TVM frontend") @@ -109,29 +106,26 @@ def test_tuple_get_item(self): assert len(layers) == 4 - assert layers[0].type[0] == 'Input' - assert isinstance(layers[0].attrs['dtype'], str) - assert layers[0].attrs['dtype'] == 'int64' - assert 'relay_id' in layers[0].attrs + assert layers[0].type[0] == "Input" + assert isinstance(layers[0].attrs["dtype"], str) + assert layers[0].attrs["dtype"] == "int64" + assert "relay_id" in layers[0].attrs - assert layers[1].type[0] == 'Input' - assert isinstance(layers[0].attrs['dtype'], str) - assert layers[0].attrs['dtype'] == 'int64' - assert 'relay_id' in layers[0].attrs + assert layers[1].type[0] == "Input" + assert isinstance(layers[0].attrs["dtype"], str) + assert layers[0].attrs["dtype"] == "int64" + assert "relay_id" in layers[0].attrs - assert layers[2].type[0] == 'Tuple' + assert layers[2].type[0] == "Tuple" assert layers[2].shapes == TupleShape([[-1, 4, 2, 2], [-1, 3, 2, 2]]) - assert layers[3].type[0] == 'TupleGetItem' - assert layers[3].attrs['index'] == 0 + assert layers[3].type[0] == "TupleGetItem" + assert layers[3].attrs["index"] == 0 assert layers[3].shapes == TensorShape([-1, 4, 2, 2]) @unittest.skipIf(skip, "Could not import TVM and/or TVM frontend") def test_relay_op(self): - data = relay.var( - "data", - relay.TensorType((-1, 4, 2, 2), "float32") - ) + data = relay.var("data", relay.TensorType((-1, 4, 2, 2), "float32")) net = relay.std(data, axis=1, keepdims=False, exclude=False) @@ -144,23 +138,23 @@ def test_relay_op(self): layers = xgraph.get_layers() - assert layers[0].type[0] == 'Input' + assert layers[0].type[0] == "Input" - assert layers[1].type[0] == 'Mean' + assert layers[1].type[0] == "Mean" assert layers[1].shapes == [-1, 1, 2, 2] # assert isinstance(layers[1].attrs['relay_id'], list) - assert layers[1].attrs['axes'] == [1] - assert layers[1].attrs['keepdims'] is True + assert layers[1].attrs["axes"] == [1] + assert layers[1].attrs["keepdims"] is True - assert layers[2].type[0] == 'RelayOp' + assert layers[2].type[0] == "RelayOp" assert layers[2].shapes == [-1, 2, 2] # assert isinstance(layers[2].attrs['relay_id'], list) - assert layers[2].attrs['relay_shape'] == [-1, 2, 2] - assert layers[2].attrs['dtype'] == 'float32' - assert layers[2].attrs['axis'] == '[1]' - assert layers[2].attrs['keepdims'] == '0' - assert layers[2].attrs['exclude'] == '0' + assert layers[2].attrs["relay_shape"] == [-1, 2, 2] + assert layers[2].attrs["dtype"] == "float32" + assert layers[2].attrs["axis"] == "[1]" + assert layers[2].attrs["keepdims"] == "0" + assert layers[2].attrs["exclude"] == "0" - assert layers[3].type[0] == 'Sqrt' + assert layers[3].type[0] == "Sqrt" assert layers[3].shapes == [-1, 2, 2] # assert isinstance(layers[3].attrs['relay_id'], list) diff --git a/tests/unit/frontend/tvm/relay_tools/test_relay_l10_temporary.py b/tests/unit/frontend/tvm/relay_tools/test_relay_l10_temporary.py index ec42e7f..ac9d3f9 100644 --- a/tests/unit/frontend/tvm/relay_tools/test_relay_l10_temporary.py +++ b/tests/unit/frontend/tvm/relay_tools/test_relay_l10_temporary.py @@ -35,17 +35,12 @@ class TestRelayL10TemporaryOperationConversions(unittest.TestCase): - @unittest.skipIf(skip, "Could not import TVM and/or TVM frontend") def test_nn_adaptive_avg_pool2d_1(self): warnings.filterwarnings("ignore") - data = relay.var( - "data", - relay.TensorType((-1, 4, 5, 5), "float32") - ) + data = relay.var("data", relay.TensorType((-1, 4, 5, 5), "float32")) - net = relay.nn.adaptive_avg_pool2d( - data, output_size=(3, 3), layout='NCHW') + net = relay.nn.adaptive_avg_pool2d(data, output_size=(3, 3), layout="NCHW") net = relay.Function(relay.analysis.free_vars(net), net) @@ -55,27 +50,23 @@ def test_nn_adaptive_avg_pool2d_1(self): layers = xgraph.get_layers() - assert layers[0].type[0] == 'Input' - assert layers[1].type[0] == 'Pooling' + assert layers[0].type[0] == "Input" + assert layers[1].type[0] == "Pooling" assert layers[1].shapes.tolist() == [-1, 4, 3, 3] - assert layers[1].attrs['padding'] == [[0, 0], [0, 0], [0, 0], [0, 0]] - assert layers[1].attrs['insize'] == [5, 5] - assert layers[1].attrs['outsize'] == [3, 3] - assert layers[1].attrs['data_layout'] == 'NCHW' - assert layers[1].attrs['strides'] == [1, 1] - assert layers[1].attrs['kernel_size'] == [3, 3] - assert layers[1].attrs['pool_type'] == 'Avg' + assert layers[1].attrs["padding"] == [[0, 0], [0, 0], [0, 0], [0, 0]] + assert layers[1].attrs["insize"] == [5, 5] + assert layers[1].attrs["outsize"] == [3, 3] + assert layers[1].attrs["data_layout"] == "NCHW" + assert layers[1].attrs["strides"] == [1, 1] + assert layers[1].attrs["kernel_size"] == [3, 3] + assert layers[1].attrs["pool_type"] == "Avg" @unittest.skipIf(skip, "Could not import TVM and/or TVM frontend") def test_nn_adaptive_avg_pool2d_2(self): warnings.filterwarnings("ignore") - data = relay.var( - "data", - relay.TensorType((-1, 4, 6, 6), "float32") - ) + data = relay.var("data", relay.TensorType((-1, 4, 6, 6), "float32")) - net = relay.nn.adaptive_avg_pool2d( - data, output_size=(3, 3), layout='NCHW') + net = relay.nn.adaptive_avg_pool2d(data, output_size=(3, 3), layout="NCHW") net = relay.Function(relay.analysis.free_vars(net), net) @@ -85,27 +76,23 @@ def test_nn_adaptive_avg_pool2d_2(self): layers = xgraph.get_layers() - assert layers[0].type[0] == 'Input' - assert layers[1].type[0] == 'Pooling' + assert layers[0].type[0] == "Input" + assert layers[1].type[0] == "Pooling" assert layers[1].shapes.tolist() == [-1, 4, 3, 3] - assert layers[1].attrs['padding'] == [[0, 0], [0, 0], [0, 0], [0, 0]] - assert layers[1].attrs['insize'] == [6, 6] - assert layers[1].attrs['outsize'] == [3, 3] - assert layers[1].attrs['data_layout'] == 'NCHW' - assert layers[1].attrs['strides'] == [2, 2] - assert layers[1].attrs['kernel_size'] == [2, 2] - assert layers[1].attrs['pool_type'] == 'Avg' + assert layers[1].attrs["padding"] == [[0, 0], [0, 0], [0, 0], [0, 0]] + assert layers[1].attrs["insize"] == [6, 6] + assert layers[1].attrs["outsize"] == [3, 3] + assert layers[1].attrs["data_layout"] == "NCHW" + assert layers[1].attrs["strides"] == [2, 2] + assert layers[1].attrs["kernel_size"] == [2, 2] + assert layers[1].attrs["pool_type"] == "Avg" @unittest.skipIf(skip, "Could not import TVM and/or TVM frontend") def test_nn_adaptive_avg_pool2d_3(self): warnings.filterwarnings("ignore") - data = relay.var( - "data", - relay.TensorType((-1, 6, 6, 4), "float32") - ) + data = relay.var("data", relay.TensorType((-1, 6, 6, 4), "float32")) - net = relay.nn.adaptive_avg_pool2d( - data, output_size=(6, 6), layout='NHWC') + net = relay.nn.adaptive_avg_pool2d(data, output_size=(6, 6), layout="NHWC") net = relay.Function(relay.analysis.free_vars(net), net) @@ -115,30 +102,26 @@ def test_nn_adaptive_avg_pool2d_3(self): layers = xgraph.get_layers() - assert layers[0].type[0] == 'Input' + assert layers[0].type[0] == "Input" assert layers[0].shapes.tolist() == [-1, 6, 6, 4] - assert layers[1].type[0] == 'Transpose' + assert layers[1].type[0] == "Transpose" assert layers[1].shapes.tolist() == [-1, 4, 6, 6] - assert layers[2].type[0] == 'Pooling' + assert layers[2].type[0] == "Pooling" assert layers[2].shapes.tolist() == [-1, 4, 6, 6] - assert layers[2].attrs['padding'] == [[0, 0], [0, 0], [0, 0], [0, 0]] - assert layers[2].attrs['insize'] == [6, 6] - assert layers[2].attrs['outsize'] == [6, 6] - assert layers[2].attrs['data_layout'] == 'NCHW' - assert layers[2].attrs['strides'] == [1, 1] - assert layers[2].attrs['kernel_size'] == [1, 1] - assert layers[2].attrs['pool_type'] == 'Avg' + assert layers[2].attrs["padding"] == [[0, 0], [0, 0], [0, 0], [0, 0]] + assert layers[2].attrs["insize"] == [6, 6] + assert layers[2].attrs["outsize"] == [6, 6] + assert layers[2].attrs["data_layout"] == "NCHW" + assert layers[2].attrs["strides"] == [1, 1] + assert layers[2].attrs["kernel_size"] == [1, 1] + assert layers[2].attrs["pool_type"] == "Avg" @unittest.skipIf(skip, "Could not import TVM and/or TVM frontend") def test_nn_adaptive_avg_pool2d_4(self): warnings.filterwarnings("ignore") - data = relay.var( - "data", - relay.TensorType((-1, 5, 5, 4), "float32") - ) + data = relay.var("data", relay.TensorType((-1, 5, 5, 4), "float32")) - net = relay.nn.adaptive_avg_pool2d( - data, output_size=(1, 1), layout='NHWC') + net = relay.nn.adaptive_avg_pool2d(data, output_size=(1, 1), layout="NHWC") net = relay.Function(relay.analysis.free_vars(net), net) @@ -148,17 +131,17 @@ def test_nn_adaptive_avg_pool2d_4(self): layers = xgraph.get_layers() - assert layers[0].type[0] == 'Input' - assert layers[1].type[0] == 'Transpose' - assert layers[2].type[0] == 'Pooling' + assert layers[0].type[0] == "Input" + assert layers[1].type[0] == "Transpose" + assert layers[2].type[0] == "Pooling" assert layers[2].shapes.tolist() == [-1, 4, 1, 1] - assert layers[2].attrs['padding'] == [[0, 0], [0, 0], [0, 0], [0, 0]] - assert layers[2].attrs['insize'] == [5, 5] - assert layers[2].attrs['outsize'] == [1, 1] - assert layers[2].attrs['data_layout'] == 'NCHW' - assert layers[2].attrs['strides'] == [5, 5] - assert layers[2].attrs['kernel_size'] == [5, 5] - assert layers[2].attrs['pool_type'] == 'Avg' + assert layers[2].attrs["padding"] == [[0, 0], [0, 0], [0, 0], [0, 0]] + assert layers[2].attrs["insize"] == [5, 5] + assert layers[2].attrs["outsize"] == [1, 1] + assert layers[2].attrs["data_layout"] == "NCHW" + assert layers[2].attrs["strides"] == [5, 5] + assert layers[2].attrs["kernel_size"] == [5, 5] + assert layers[2].attrs["pool_type"] == "Avg" @unittest.skipIf(skip, "Could not import TVM and/or TVM frontend") def test_slice_like(self): @@ -172,9 +155,9 @@ def test_slice_like(self): xgraph = xf_relay.from_relay(mod, {}) layers = xgraph.get_layers() - assert layers[0].type[0] == 'Constant' - assert layers[1].type[0] == 'Constant' - assert layers[2].type[0] == 'AnyOp' + assert layers[0].type[0] == "Constant" + assert layers[1].type[0] == "Constant" + assert layers[2].type[0] == "AnyOp" assert layers[2].shapes == [1, 4, 3, 3] data = relay.expr.const(np.ones((1, 6, 4, 4), np.float32)) @@ -187,7 +170,7 @@ def test_slice_like(self): xgraph = xf_relay.from_relay(mod, {}) layers = xgraph.get_layers() - assert layers[0].type[0] == 'Constant' - assert layers[1].type[0] == 'Constant' - assert layers[2].type[0] == 'AnyOp' + assert layers[0].type[0] == "Constant" + assert layers[1].type[0] == "Constant" + assert layers[2].type[0] == "AnyOp" assert layers[2].shapes == [1, 6, 3, 3] diff --git a/tests/unit/frontend/tvm/relay_tools/test_relay_l1_basic.py b/tests/unit/frontend/tvm/relay_tools/test_relay_l1_basic.py index 1577dee..5272a33 100644 --- a/tests/unit/frontend/tvm/relay_tools/test_relay_l1_basic.py +++ b/tests/unit/frontend/tvm/relay_tools/test_relay_l1_basic.py @@ -36,13 +36,9 @@ class TestRelayL1BasicConversions(unittest.TestCase): - @unittest.skipIf(skip, "Could not import TVM and/or TVM frontend") def test_add_biasadd(self): - left = relay.var( - "left", - relay.TensorType((-1, 4, 2, 2), "float32") - ) + left = relay.var("left", relay.TensorType((-1, 4, 2, 2), "float32")) right = relay.expr.const(np.array([1.0, -1.0], dtype=np.float32)) @@ -57,21 +53,18 @@ def test_add_biasadd(self): layers = xgraph.get_layers() - assert layers[0].type[0] == 'Input' - assert 'relay_id' in layers[0].attrs + assert layers[0].type[0] == "Input" + assert "relay_id" in layers[0].attrs - assert layers[1].type[0] == 'BiasAdd' + assert layers[1].type[0] == "BiasAdd" assert layers[1].shapes == [-1, 4, 2, 2] - assert layers[1].bottoms == ['left'] - assert 'relay_id' in layers[1].attrs - assert layers[1].attrs['axis'] == 3 + assert layers[1].bottoms == ["left"] + assert "relay_id" in layers[1].attrs + assert layers[1].attrs["axis"] == 3 @unittest.skipIf(skip, "Could not import TVM and/or TVM frontend") def test_add(self): - left = relay.var( - "left", - relay.TensorType((-1, 4, 2, 2), "float32") - ) + left = relay.var("left", relay.TensorType((-1, 4, 2, 2), "float32")) right = relay.expr.const(np.zeros((2, 2), dtype=np.float32)) net = relay.add(left, right) @@ -82,24 +75,24 @@ def test_add(self): xgraph = xf_relay.from_relay(mod, {}) layers = xgraph.get_layers() - assert layers[0].type[0] == 'Input' - assert 'relay_id' in layers[0].attrs + assert layers[0].type[0] == "Input" + assert "relay_id" in layers[0].attrs - assert layers[1].type[0] == 'Constant' - assert layers[1].tops[0][:3] == 'add' - assert 'relay_id' in layers[1].attrs + assert layers[1].type[0] == "Constant" + assert layers[1].tops[0][:3] == "add" + assert "relay_id" in layers[1].attrs - assert layers[2].type[0] == 'Add' + assert layers[2].type[0] == "Add" assert layers[2].shapes == [-1, 4, 2, 2] - assert 'relay_id' in layers[2].attrs + assert "relay_id" in layers[2].attrs @unittest.skipIf(skip, "Could not import TVM and/or TVM frontend") def test_batch_norm(self): var = relay.var("var", relay.TensorType((-1, 4, 2, 2), "float32")) data_mean = relay.expr.const(np.zeros((4,), dtype=np.float32)) data_var = relay.expr.const(np.ones((4,), dtype=np.float32)) - gamma = relay.expr.const(2. * np.ones((4,), dtype=np.float32)) - beta = relay.expr.const(3. * np.ones((4,), dtype=np.float32)) + gamma = relay.expr.const(2.0 * np.ones((4,), dtype=np.float32)) + beta = relay.expr.const(3.0 * np.ones((4,), dtype=np.float32)) bn = relay.nn.batch_norm(var, gamma, beta, data_mean, data_var)[0] # tgi = relay.TupleGetItem(bn, 0) @@ -112,18 +105,22 @@ def test_batch_norm(self): assert len(layers) == 2 - assert layers[0].type[0] == 'Input' - assert 'relay_id' in layers[0].attrs + assert layers[0].type[0] == "Input" + assert "relay_id" in layers[0].attrs bnl = layers[1] - assert bnl.type[0] == 'BatchNorm' + assert bnl.type[0] == "BatchNorm" assert bnl.shapes == [-1, 4, 2, 2] np.testing.assert_array_equal(bnl.data[0], np.zeros((4,), dtype=np.float32)) np.testing.assert_array_equal(bnl.data[1], np.ones((4,), dtype=np.float32)) - np.testing.assert_array_equal(bnl.data[2], 2. * np.ones((4,), dtype=np.float32)) - np.testing.assert_array_equal(bnl.data[3], 3. * np.ones((4,), dtype=np.float32)) - assert 'relay_id' in bnl.attrs - assert bnl.attrs['axis'] == 1 + np.testing.assert_array_equal( + bnl.data[2], 2.0 * np.ones((4,), dtype=np.float32) + ) + np.testing.assert_array_equal( + bnl.data[3], 3.0 * np.ones((4,), dtype=np.float32) + ) + assert "relay_id" in bnl.attrs + assert bnl.attrs["axis"] == 1 @unittest.skipIf(skip, "Could not import TVM and/or TVM frontend") def test_biasadd(self): @@ -140,14 +137,14 @@ def test_biasadd(self): assert len(layers) == 2 - assert layers[0].type[0] == 'Input' - assert 'relay_id' in layers[0].attrs + assert layers[0].type[0] == "Input" + assert "relay_id" in layers[0].attrs - assert layers[1].type[0] == 'BiasAdd' + assert layers[1].type[0] == "BiasAdd" assert layers[1].shapes == [-1, 4, 2, 2] - assert layers[1].bottoms == ['data'] - assert 'relay_id' in layers[1].attrs - assert layers[1].attrs['axis'] == 1 + assert layers[1].bottoms == ["data"] + assert "relay_id" in layers[1].attrs + assert layers[1].attrs["axis"] == 1 @unittest.skipIf(skip, "Could not import TVM and/or TVM frontend") def test_concatenate(self): @@ -164,19 +161,19 @@ def test_concatenate(self): assert len(layers) == 3 - assert layers[0].type[0] == 'Input' + assert layers[0].type[0] == "Input" assert layers[0].shapes == [-1, 4, 2, 2] - assert 'relay_id' in layers[0].attrs + assert "relay_id" in layers[0].attrs - assert layers[1].type[0] == 'Input' + assert layers[1].type[0] == "Input" assert layers[1].shapes == [-1, 8, 2, 2] - assert 'relay_id' in layers[1].attrs + assert "relay_id" in layers[1].attrs - assert layers[2].type[0] == 'Concat' + assert layers[2].type[0] == "Concat" assert layers[2].shapes == [-1, 12, 2, 2] - assert layers[2].bottoms == ['data1', 'data2'] - assert 'relay_id' in layers[2].attrs - assert layers[2].attrs['axis'] == 1 + assert layers[2].bottoms == ["data1", "data2"] + assert "relay_id" in layers[2].attrs + assert layers[2].attrs["axis"] == 1 @unittest.skipIf(skip, "Could not import TVM and/or TVM frontend") def test_dense(self): @@ -193,14 +190,14 @@ def test_dense(self): assert len(layers) == 2 - assert layers[0].type[0] == 'Input' - assert 'relay_id' in layers[0].attrs + assert layers[0].type[0] == "Input" + assert "relay_id" in layers[0].attrs assert layers[0].shapes == [-1, 4] - assert layers[1].type[0] == 'Dense' + assert layers[1].type[0] == "Dense" assert layers[1].shapes == [-1, 10] - assert layers[1].bottoms == ['data'] - assert 'relay_id' in layers[1].attrs + assert layers[1].bottoms == ["data"] + assert "relay_id" in layers[1].attrs @unittest.skipIf(skip, "Could not import TVM and/or TVM frontend") def test_nn_dropout(self): @@ -216,22 +213,19 @@ def test_nn_dropout(self): assert len(layers) == 2 - assert layers[0].type[0] == 'Input' - assert 'relay_id' in layers[0].attrs + assert layers[0].type[0] == "Input" + assert "relay_id" in layers[0].attrs assert layers[0].shapes == [-1, 4, 2, 2] - assert layers[1].type[0] == 'Dropout' + assert layers[1].type[0] == "Dropout" assert layers[1].shapes == [-1, 4, 2, 2] - assert layers[1].bottoms == ['data'] - assert 'relay_id' in layers[1].attrs - assert layers[1].attrs['rate'] == .5 + assert layers[1].bottoms == ["data"] + assert "relay_id" in layers[1].attrs + assert layers[1].attrs["rate"] == 0.5 @unittest.skipIf(skip, "Could not import TVM and/or TVM frontend") def test_exp(self): - data = relay.var( - "data", - relay.TensorType((-1, 4, 2, 2), "float32") - ) + data = relay.var("data", relay.TensorType((-1, 4, 2, 2), "float32")) net = relay.exp(data) @@ -243,16 +237,13 @@ def test_exp(self): layers = xgraph.get_layers() - assert layers[0].type[0] == 'Input' - assert layers[1].type[0] == 'Exp' - assert 'relay_id' in layers[1].attrs + assert layers[0].type[0] == "Input" + assert layers[1].type[0] == "Exp" + assert "relay_id" in layers[1].attrs @unittest.skipIf(skip, "Could not import TVM and/or TVM frontend") def test_expand_dims(self): - data = relay.var( - "data", - relay.TensorType((-1, 4), "float32") - ) + data = relay.var("data", relay.TensorType((-1, 4), "float32")) net = relay.expand_dims(data, axis=1, num_newaxis=2) @@ -264,19 +255,16 @@ def test_expand_dims(self): layers = xgraph.get_layers() - assert layers[0].type[0] == 'Input' - assert layers[1].type[0] == 'ExpandDims' - assert 'relay_id' in layers[1].attrs - assert layers[1].attrs['axis'] == 1 - assert layers[1].attrs['num_newaxis'] == 2 + assert layers[0].type[0] == "Input" + assert layers[1].type[0] == "ExpandDims" + assert "relay_id" in layers[1].attrs + assert layers[1].attrs["axis"] == 1 + assert layers[1].attrs["num_newaxis"] == 2 assert layers[1].shapes == [-1, 1, 1, 4] @unittest.skipIf(skip, "Could not import TVM and/or TVM frontend") def test_log(self): - data = relay.var( - "data", - relay.TensorType((-1, 4, 2, 2), "float32") - ) + data = relay.var("data", relay.TensorType((-1, 4, 2, 2), "float32")) net = relay.log(data) @@ -288,21 +276,15 @@ def test_log(self): layers = xgraph.get_layers() - assert layers[0].type[0] == 'Input' - assert layers[1].type[0] == 'Log' - assert 'relay_id' in layers[1].attrs + assert layers[0].type[0] == "Input" + assert layers[1].type[0] == "Log" + assert "relay_id" in layers[1].attrs @unittest.skipIf(skip, "Could not import TVM and/or TVM frontend") def test_multiply(self): - left = relay.var( - "left", - relay.TensorType((-1, 4, 2, 2), "float32") - ) + left = relay.var("left", relay.TensorType((-1, 4, 2, 2), "float32")) - right = relay.var( - "right", - relay.TensorType((-1, 4, 2, 2), "float32") - ) + right = relay.var("right", relay.TensorType((-1, 4, 2, 2), "float32")) net = relay.multiply(left, right) @@ -315,22 +297,19 @@ def test_multiply(self): layers = xgraph.get_layers() - assert layers[0].type[0] == 'Input' - assert 'relay_id' in layers[0].attrs + assert layers[0].type[0] == "Input" + assert "relay_id" in layers[0].attrs - assert layers[1].type[0] == 'Input' - assert 'relay_id' in layers[1].attrs + assert layers[1].type[0] == "Input" + assert "relay_id" in layers[1].attrs - assert layers[2].type[0] == 'Multiply' + assert layers[2].type[0] == "Multiply" assert layers[2].shapes == [-1, 4, 2, 2] - assert 'relay_id' in layers[1].attrs + assert "relay_id" in layers[1].attrs @unittest.skipIf(skip, "Could not import TVM and/or TVM frontend") def test_multiply_right_constant(self): - left = relay.var( - "left", - relay.TensorType((-1, 4, 2, 2), "float32") - ) + left = relay.var("left", relay.TensorType((-1, 4, 2, 2), "float32")) right = relay.expr.const(np.zeros((2, 2), dtype=np.float32)) @@ -345,19 +324,16 @@ def test_multiply_right_constant(self): layers = xgraph.get_layers() - assert layers[0].type[0] == 'Input' - assert 'relay_id' in layers[0].attrs + assert layers[0].type[0] == "Input" + assert "relay_id" in layers[0].attrs - assert layers[1].type[0] == 'Scale' + assert layers[1].type[0] == "Scale" assert layers[1].shapes == [-1, 4, 2, 2] - assert 'relay_id' in layers[1].attrs + assert "relay_id" in layers[1].attrs @unittest.skipIf(skip, "Could not import TVM and/or TVM frontend") def test_multiply_left_constant(self): - right = relay.var( - "right", - relay.TensorType((-1, 4, 2, 2), "float32") - ) + right = relay.var("right", relay.TensorType((-1, 4, 2, 2), "float32")) left = relay.expr.const(np.zeros((2, 2), dtype=np.float32)) net = relay.multiply(left, right) @@ -368,12 +344,12 @@ def test_multiply_left_constant(self): xgraph = xf_relay.from_relay(mod, {}) layers = xgraph.get_layers() - assert layers[0].type[0] == 'Input' - assert 'relay_id' in layers[0].attrs + assert layers[0].type[0] == "Input" + assert "relay_id" in layers[0].attrs - assert layers[1].type[0] == 'Scale' + assert layers[1].type[0] == "Scale" assert layers[1].shapes == [-1, 4, 2, 2] - assert 'relay_id' in layers[1].attrs + assert "relay_id" in layers[1].attrs @unittest.skipIf(skip, "Could not import TVM and/or TVM frontend") def test_relu(self): @@ -382,38 +358,32 @@ def test_relu(self): net = relay.nn.relu(data) net = relay.Function(relay.analysis.free_vars(net), net) mod, params = testing.create_workload(net) - + xgraph = xf_relay.from_relay(mod, params) layers = xgraph.get_layers() - assert layers[0].type[0] == 'Input' - assert layers[1].type[0] == 'ReLU' - assert 'relay_id' in layers[1].attrs + assert layers[0].type[0] == "Input" + assert layers[1].type[0] == "ReLU" + assert "relay_id" in layers[1].attrs @unittest.skipIf(skip, "Could not import TVM and/or TVM frontend") def test_rsqrt(self): - data = relay.var( - "data", - relay.TensorType((-1, 4, 2, 2), "float32") - ) + data = relay.var("data", relay.TensorType((-1, 4, 2, 2), "float32")) net = relay.rsqrt(data) net = relay.Function(relay.analysis.free_vars(net), net) mod, params = testing.create_workload(net) - + xgraph = xf_relay.from_relay(mod, params) layers = xgraph.get_layers() - assert layers[0].type[0] == 'Input' - assert layers[1].type[0] == 'rSqrt' - assert 'relay_id' in layers[1].attrs + assert layers[0].type[0] == "Input" + assert layers[1].type[0] == "rSqrt" + assert "relay_id" in layers[1].attrs @unittest.skipIf(skip, "Could not import TVM and/or TVM frontend") def test_sigmoid(self): - data = relay.var( - "data", - relay.TensorType((-1, 4, 2, 2), "float32") - ) + data = relay.var("data", relay.TensorType((-1, 4, 2, 2), "float32")) net = relay.sigmoid(data) net = relay.Function(relay.analysis.free_vars(net), net) @@ -422,16 +392,13 @@ def test_sigmoid(self): xgraph = xf_relay.from_relay(mod, params) layers = xgraph.get_layers() - assert layers[0].type[0] == 'Input' - assert layers[1].type[0] == 'Sigmoid' - assert 'relay_id' in layers[1].attrs + assert layers[0].type[0] == "Input" + assert layers[1].type[0] == "Sigmoid" + assert "relay_id" in layers[1].attrs @unittest.skipIf(skip, "Could not import TVM and/or TVM frontend") def test_softmax(self): - data = relay.var( - "data", - relay.TensorType((-1, 4, 2, 2), "float32") - ) + data = relay.var("data", relay.TensorType((-1, 4, 2, 2), "float32")) net = relay.nn.softmax(data) net = relay.Function(relay.analysis.free_vars(net), net) @@ -440,18 +407,15 @@ def test_softmax(self): xgraph = xf_relay.from_relay(mod, params) layers = xgraph.get_layers() - assert layers[0].type[0] == 'Input' - assert layers[1].type[0] == 'Softmax' - assert 'relay_id' in layers[1].attrs - assert 'axis' in layers[1].attrs - assert layers[1].attrs['axis'] == -1 + assert layers[0].type[0] == "Input" + assert layers[1].type[0] == "Softmax" + assert "relay_id" in layers[1].attrs + assert "axis" in layers[1].attrs + assert layers[1].attrs["axis"] == -1 @unittest.skipIf(skip, "Could not import TVM and/or TVM frontend") def test_sqrt(self): - data = relay.var( - "data", - relay.TensorType((-1, 4, 2, 2), "float32") - ) + data = relay.var("data", relay.TensorType((-1, 4, 2, 2), "float32")) net = relay.sqrt(data) net = relay.Function(relay.analysis.free_vars(net), net) @@ -460,9 +424,9 @@ def test_sqrt(self): xgraph = xf_relay.from_relay(mod, params) layers = xgraph.get_layers() - assert layers[0].type[0] == 'Input' - assert layers[1].type[0] == 'Sqrt' - assert 'relay_id' in layers[1].attrs + assert layers[0].type[0] == "Input" + assert layers[1].type[0] == "Sqrt" + assert "relay_id" in layers[1].attrs @unittest.skipIf(skip, "Could not import TVM and/or TVM frontend") def test_subtract(self): @@ -479,9 +443,9 @@ def test_subtract(self): assert len(layers) == 3 - assert layers[0].type[0] == 'Input' - assert layers[1].type[0] == 'Constant' + assert layers[0].type[0] == "Input" + assert layers[1].type[0] == "Constant" - assert layers[2].type[0] == 'Sub' + assert layers[2].type[0] == "Sub" assert layers[2].shapes == [-1, 4, 2, 2] - assert 'relay_id' in layers[1].attrs + assert "relay_id" in layers[1].attrs diff --git a/tests/unit/frontend/tvm/relay_tools/test_relay_l2_convolution.py b/tests/unit/frontend/tvm/relay_tools/test_relay_l2_convolution.py index ed3aeea..d3fcb79 100644 --- a/tests/unit/frontend/tvm/relay_tools/test_relay_l2_convolution.py +++ b/tests/unit/frontend/tvm/relay_tools/test_relay_l2_convolution.py @@ -34,12 +34,17 @@ class TestRelayL2Convolutions(unittest.TestCase): - @unittest.skipIf(skip, "Could not import TVM and/or TVM frontend") def test_avg_pool2d(self): var = relay.var("var", relay.TensorType((-1, 2, 5, 5), "float32")) - avg_pool = relay.nn.avg_pool2d(var, pool_size=(3, 3), strides=(2, 2), padding=(1, 1), - ceil_mode=True, count_include_pad=True) + avg_pool = relay.nn.avg_pool2d( + var, + pool_size=(3, 3), + strides=(2, 2), + padding=(1, 1), + ceil_mode=True, + count_include_pad=True, + ) func = relay.Function([var], avg_pool) mod = tvm.IRModule.from_expr(func) @@ -50,20 +55,20 @@ def test_avg_pool2d(self): assert len(layers) == 2 - assert layers[0].type[0] == 'Input' - assert 'relay_id' in layers[0].attrs + assert layers[0].type[0] == "Input" + assert "relay_id" in layers[0].attrs X = layers[1] - assert X.type[0] == 'Pooling' + assert X.type[0] == "Pooling" assert X.shapes == [-1, 2, 3, 3] - assert 'relay_id' in X.attrs - assert X.attrs['padding'] == [[0, 0], [0, 0], [1, 1], [1, 1]] - assert X.attrs['insize'] == [5, 5] - assert X.attrs['outsize'] == [3, 3] - assert X.attrs['data_layout'] == 'NCHW' - assert X.attrs['strides'] == [2, 2] - assert X.attrs['kernel_size'] == [3, 3] - assert X.attrs['pool_type'] == 'Avg' + assert "relay_id" in X.attrs + assert X.attrs["padding"] == [[0, 0], [0, 0], [1, 1], [1, 1]] + assert X.attrs["insize"] == [5, 5] + assert X.attrs["outsize"] == [3, 3] + assert X.attrs["data_layout"] == "NCHW" + assert X.attrs["strides"] == [2, 2] + assert X.attrs["kernel_size"] == [3, 3] + assert X.attrs["pool_type"] == "Avg" @unittest.skipIf(skip, "Could not import TVM and/or TVM frontend") def test_batch_flatten(self): @@ -77,49 +82,135 @@ def test_batch_flatten(self): xgraph = xf_relay.from_relay(mod, {}) layers = xgraph.get_layers() - assert layers[0].type[0] == 'Input' - - assert layers[1].type[0] == 'Flatten' + assert layers[0].type[0] == "Input" + + assert layers[1].type[0] == "Flatten" assert layers[1].shapes == [-1, 4] - assert 'relay_id' in layers[1].attrs + assert "relay_id" in layers[1].attrs + + def conv2d_test_util( + self, + in_shape, + weight_shape, + out_shape, + padding=(0, 0), + strides=(1, 1), + dilation=(1, 1), + groups=1, + data_layout="NCHW", + kernel_layout="OIHW", + ): + data = relay.var("data", relay.TensorType(in_shape, "float32")) + weight = relay.expr.const(np.ones(weight_shape, dtype=np.float32)) + c = relay.nn.conv2d( + data=data, + weight=weight, + strides=strides, + padding=padding, + dilation=dilation, + groups=groups, + data_layout=data_layout, + kernel_layout=kernel_layout, + ) - @unittest.skipIf(skip, "Could not import TVM and/or TVM frontend") - def test_conv2d(self): - data = relay.var("data", relay.TensorType((-1, 1, 4, 4), "float32")) - weight = relay.expr.const(np.ones((2, 1, 2, 2), dtype=np.float32)) - c = relay.nn.conv2d(data, weight, padding=(0, 0, 0, 0), kernel_layout='OIHW') - func = relay.Function([data], c) mod = tvm.IRModule.from_expr(func) mod = relay.transform.InferType()(mod) xg = xf_relay.from_relay(mod, {}) layers = xg.get_layers() - assert len(layers) == 2 - assert layers[0].type[0] == 'Input' - assert 'relay_id' in layers[0].attrs + assert layers[0].type[0] == "Input" + assert "relay_id" in layers[0].attrs X = layers[1] - assert X.type[0] == 'Convolution' - assert X.shapes == [-1, 2, 3, 3] - np.testing.assert_array_equal(X.data[0], np.ones((2, 1, 2, 2), dtype=np.float32)) - assert 'relay_id' in X.attrs - assert X.attrs['kernel_size'] == [2, 2] - assert X.attrs['strides'] == [1, 1] - assert X.attrs['padding'] == [[0, 0], [0, 0], [0, 0], [0, 0]] - assert X.attrs['channels'] == [1, 2] - assert X.attrs['data_layout'] == 'NCHW' - assert X.attrs['kernel_layout'] == 'OIHW' - assert X.attrs['groups'] == 1 + assert X.type[0] == "Convolution" + assert X.shapes == list( + out_shape + ), "Expected out shape: {0}, but got: {1}".format(out_shape, X.shapes) + # np.testing.assert_array_equal( + # X.data[0], np.ones(weight_shape, dtype=np.float32) + # ) + assert "relay_id" in X.attrs + assert X.attrs["kernel_size"] == [ + weight_shape[data_layout.index("H")], + weight_shape[data_layout.index("W")], + ] + assert X.attrs["strides"] == list(strides) + expected_padding = [ + [0, 0], + [0, 0], + [padding[0], padding[1]], + [padding[2], padding[3]], + ] + assert ( + X.attrs["padding"] == expected_padding + ), "Expected padding: {0}, but got: {1}".format( + expected_padding, X.attrs["padding"] + ) + assert X.attrs["channels"] == [ + in_shape[data_layout.index("C")], + weight_shape[kernel_layout.index("O")], + ] + assert X.attrs["data_layout"] == data_layout + assert X.attrs["kernel_layout"] == "OIHW" + assert X.attrs["groups"] == groups + + @unittest.skipIf(skip, "Could not import TVM and/or TVM frontend") + def test_conv2d(self): + self.conv2d_test_util( + in_shape=(-1, 1, 4, 4), + weight_shape=(2, 1, 2, 2), + out_shape=(-1, 2, 3, 3), + padding=(0, 0, 0, 0), + data_layout="NCHW", + kernel_layout="OIHW", + ) + self.conv2d_test_util( + in_shape=(-1, 1, 4, 4), + weight_shape=(1, 2, 2, 2), + out_shape=(-1, 2, 3, 3), + padding=(0, 0, 0, 0), + data_layout="NCHW", + kernel_layout="IOHW", + ) + self.conv2d_test_util( + in_shape=(1, 256, 28, 28), + weight_shape=(256, 256, 3, 3), + out_shape=(-1, 256, 28, 28), + padding=(2, 2, 2, 2), + dilation=(2, 2), + data_layout="NCHW", + kernel_layout="OIHW", + ) + self.conv2d_test_util( + in_shape=(1, 256, 28, 28), + weight_shape=(256, 256, 3, 3), + out_shape=(-1, 256, 28, 28), + padding=(36, 36, 36, 36), + dilation=(36, 36), + data_layout="NCHW", + kernel_layout="OIHW", + ) + self.conv2d_test_util( + in_shape=(1, 1, 4, 4), + weight_shape=(2, 1, 2, 2), + out_shape=(-1, 2, 2, 2), + padding=(0, 0, 0, 0), + dilation=(2, 2), + data_layout="NCHW", + kernel_layout="OIHW", + ) @unittest.skipIf(skip, "Could not import TVM and/or TVM frontend") def test_conv2d_transpose(self): data = relay.var("data", relay.TensorType((-1, 2, 3, 3), "float32")) weight = relay.expr.const(np.ones((2, 4, 3, 3), dtype=np.float32)) - c = relay.nn.conv2d_transpose(data, weight, padding=(0, 0, 0, 0), kernel_layout='OIHW') - + c = relay.nn.conv2d_transpose( + data, weight, padding=(0, 0, 0, 0), kernel_layout="OIHW" + ) + func = relay.Function([data], c) mod = tvm.IRModule.from_expr(func) mod = relay.transform.InferType()(mod) @@ -129,21 +220,23 @@ def test_conv2d_transpose(self): assert len(layers) == 2 - assert layers[0].type[0] == 'Input' - assert 'relay_id' in layers[0].attrs + assert layers[0].type[0] == "Input" + assert "relay_id" in layers[0].attrs X = layers[1] - assert X.type[0] == 'Conv2DTranspose' + assert X.type[0] == "Conv2DTranspose" assert X.shapes == [-1, 4, 5, 5] - np.testing.assert_array_equal(X.data[0], np.ones((4, 2, 3, 3), dtype=np.float32)) - assert 'relay_id' in X.attrs - assert X.attrs['kernel_size'] == [3, 3] - assert X.attrs['strides'] == [1, 1] - assert X.attrs['padding'] == [[0, 0], [0, 0], [0, 0], [0, 0]] - assert X.attrs['channels'] == [2, 4] - assert X.attrs['data_layout'] == 'NCHW' - assert X.attrs['kernel_layout'] == 'OIHW' - assert X.attrs['groups'] == 1 + np.testing.assert_array_equal( + X.data[0], np.ones((4, 2, 3, 3), dtype=np.float32) + ) + assert "relay_id" in X.attrs + assert X.attrs["kernel_size"] == [3, 3] + assert X.attrs["strides"] == [1, 1] + assert X.attrs["padding"] == [[0, 0], [0, 0], [0, 0], [0, 0]] + assert X.attrs["channels"] == [2, 4] + assert X.attrs["data_layout"] == "NCHW" + assert X.attrs["kernel_layout"] == "OIHW" + assert X.attrs["groups"] == 1 @unittest.skipIf(skip, "Could not import TVM and/or TVM frontend") def test_global_avg_pool2d(self): @@ -159,21 +252,21 @@ def test_global_avg_pool2d(self): assert len(layers) == 2 - assert layers[0].type[0] == 'Input' - assert 'relay_id' in layers[0].attrs + assert layers[0].type[0] == "Input" + assert "relay_id" in layers[0].attrs X = layers[1] - assert X.type[0] == 'Pooling' - assert X.bottoms == ['var'] + assert X.type[0] == "Pooling" + assert X.bottoms == ["var"] assert X.shapes == [-1, 2, 1, 1] - assert 'relay_id' in X.attrs - assert X.attrs['padding'] == [[0, 0], [0, 0], [0, 0], [0, 0]] - assert X.attrs['insize'] == [5, 5] - assert X.attrs['outsize'] == [1, 1] - assert X.attrs['data_layout'] == 'NCHW' - assert X.attrs['strides'] == [1, 1] - assert X.attrs['kernel_size'] == [5, 5] - assert X.attrs['pool_type'] == 'Avg' + assert "relay_id" in X.attrs + assert X.attrs["padding"] == [[0, 0], [0, 0], [0, 0], [0, 0]] + assert X.attrs["insize"] == [5, 5] + assert X.attrs["outsize"] == [1, 1] + assert X.attrs["data_layout"] == "NCHW" + assert X.attrs["strides"] == [1, 1] + assert X.attrs["kernel_size"] == [5, 5] + assert X.attrs["pool_type"] == "Avg" @unittest.skipIf(skip, "Could not import TVM and/or TVM frontend") def test_global_max_pool2d(self): @@ -189,26 +282,28 @@ def test_global_max_pool2d(self): assert len(layers) == 2 - assert layers[0].type[0] == 'Input' - assert 'relay_id' in layers[0].attrs + assert layers[0].type[0] == "Input" + assert "relay_id" in layers[0].attrs X = layers[1] - assert X.type[0] == 'Pooling' - assert X.bottoms == ['var'] + assert X.type[0] == "Pooling" + assert X.bottoms == ["var"] assert X.shapes == [-1, 2, 1, 1] - assert 'relay_id' in X.attrs - assert X.attrs['padding'] == [[0, 0], [0, 0], [0, 0], [0, 0]] - assert X.attrs['insize'] == [5, 5] - assert X.attrs['outsize'] == [1, 1] - assert X.attrs['data_layout'] == 'NCHW' - assert X.attrs['strides'] == [1, 1] - assert X.attrs['kernel_size'] == [5, 5] - assert X.attrs['pool_type'] == 'Max' + assert "relay_id" in X.attrs + assert X.attrs["padding"] == [[0, 0], [0, 0], [0, 0], [0, 0]] + assert X.attrs["insize"] == [5, 5] + assert X.attrs["outsize"] == [1, 1] + assert X.attrs["data_layout"] == "NCHW" + assert X.attrs["strides"] == [1, 1] + assert X.attrs["kernel_size"] == [5, 5] + assert X.attrs["pool_type"] == "Max" @unittest.skipIf(skip, "Could not import TVM and/or TVM frontend") def test_max_pool2d(self): var = relay.var("var", relay.TensorType((-1, 2, 4, 4), "float32")) - avg_pool = relay.nn.max_pool2d(var, pool_size=(2, 2), strides=(2, 2), padding=(1, 1)) + avg_pool = relay.nn.max_pool2d( + var, pool_size=(2, 2), strides=(2, 2), padding=(1, 1) + ) func = relay.Function([var], avg_pool) mod = tvm.IRModule.from_expr(func) @@ -219,25 +314,25 @@ def test_max_pool2d(self): assert len(layers) == 2 - assert layers[0].type[0] == 'Input' - assert 'relay_id' in layers[0].attrs + assert layers[0].type[0] == "Input" + assert "relay_id" in layers[0].attrs X = layers[1] - assert X.type[0] == 'Pooling' - assert X.bottoms == ['var'] + assert X.type[0] == "Pooling" + assert X.bottoms == ["var"] assert X.shapes == [-1, 2, 3, 3] - assert 'relay_id' in X.attrs - assert X.attrs['padding'] == [[0, 0], [0, 0], [1, 1], [1, 1]] - assert X.attrs['insize'] == [4, 4] - assert X.attrs['outsize'] == [3, 3] - assert X.attrs['data_layout'] == 'NCHW' - assert X.attrs['strides'] == [2, 2] - assert X.attrs['kernel_size'] == [2, 2] - assert X.attrs['pool_type'] == 'Max' + assert "relay_id" in X.attrs + assert X.attrs["padding"] == [[0, 0], [0, 0], [1, 1], [1, 1]] + assert X.attrs["insize"] == [4, 4] + assert X.attrs["outsize"] == [3, 3] + assert X.attrs["data_layout"] == "NCHW" + assert X.attrs["strides"] == [2, 2] + assert X.attrs["kernel_size"] == [2, 2] + assert X.attrs["pool_type"] == "Max" @unittest.skipIf(skip, "Could not import TVM and/or TVM frontend") def test_padding(self): - var = relay.var("var", relay.TensorType((-1, 2, 4, 4), "float32")) + var = relay.var("var", relay.TensorType((1, 2, 4, 4), "float32")) pad = relay.nn.pad(var, ((0, 0), (0, 0), (0, 1), (0, 1))) func = relay.Function([var], pad) @@ -249,22 +344,19 @@ def test_padding(self): assert len(layers) == 2 - assert layers[0].type[0] == 'Input' - assert 'relay_id' in layers[0].attrs + assert layers[0].type[0] == "Input" + assert "relay_id" in layers[0].attrs X = layers[1] - assert X.type[0] == 'Pad' - assert X.bottoms == ['var'] + assert X.type[0] == "Pad" + assert X.bottoms == ["var"] assert X.shapes == [-1, 2, 5, 5] - assert 'relay_id' in X.attrs - assert X.attrs['padding'] == [[0, 0], [0, 0], [0, 1], [0, 1]] + assert "relay_id" in X.attrs + assert X.attrs["padding"] == [[0, 0], [0, 0], [0, 1], [0, 1]] @unittest.skipIf(skip, "Could not import TVM and/or TVM frontend") def test_nn_upsampling(self): - data = relay.var( - "data", - relay.TensorType((-1, 4, 2, 2), "float32") - ) + data = relay.var("data", relay.TensorType((-1, 4, 2, 2), "float32")) net = relay.nn.upsampling(data, scale_h=3, scale_w=2) @@ -279,12 +371,12 @@ def test_nn_upsampling(self): layers = xgraph.get_layers() - assert layers[0].type[0] == 'Input' - assert layers[1].type[0] == 'Upsampling2D' - assert 'relay_id' in layers[1].attrs + assert layers[0].type[0] == "Input" + assert layers[1].type[0] == "Upsampling2D" + assert "relay_id" in layers[1].attrs assert layers[1].shapes == [-1, 4, 6, 4] - assert layers[1].attrs['scale_h'] == 3 - assert layers[1].attrs['scale_w'] == 2 - assert layers[1].attrs['data_layout'] == 'NCHW' - assert layers[1].attrs['method'] == 'nearest_neighbor' - assert layers[1].attrs['align_corners'] is False + assert layers[1].attrs["scale_h"] == 3 + assert layers[1].attrs["scale_w"] == 2 + assert layers[1].attrs["data_layout"] == "NCHW" + assert layers[1].attrs["method"] == "nearest_neighbor" + assert layers[1].attrs["align_corners"] is False diff --git a/tests/unit/frontend/tvm/relay_tools/test_relay_l3_math_and_transform.py b/tests/unit/frontend/tvm/relay_tools/test_relay_l3_math_and_transform.py index 9e4e4e1..b04222a 100644 --- a/tests/unit/frontend/tvm/relay_tools/test_relay_l3_math_and_transform.py +++ b/tests/unit/frontend/tvm/relay_tools/test_relay_l3_math_and_transform.py @@ -36,11 +36,10 @@ class TestRelayL3MathAndTransform(unittest.TestCase): - @unittest.skipIf(skip, "Could not import TVM and/or TVM frontend") def test_arange(self): - start = relay.expr.const(1.) - stop = relay.expr.const(5.) + start = relay.expr.const(1.0) + stop = relay.expr.const(5.0) interval = relay.expr.const(1.5) a = relay.arange(start, stop, interval) net = relay.Function([], a) @@ -52,23 +51,23 @@ def test_arange(self): assert len(layers) == 4 - assert layers[0].type[0] == 'Constant' + assert layers[0].type[0] == "Constant" assert layers[0].shapes == [1] - assert layers[1].type[0] == 'Constant' + assert layers[1].type[0] == "Constant" assert layers[1].shapes == [1] - assert layers[2].type[0] == 'Constant' + assert layers[2].type[0] == "Constant" assert layers[2].shapes == [1] - assert layers[3].type[0] == 'AnyOp' + assert layers[3].type[0] == "AnyOp" assert layers[3].shapes == [3] @unittest.skipIf(skip, "Could not import TVM and/or TVM frontend") def test_cast(self): data = relay.var("data", relay.TensorType((-1, 6, 4, 4), "float32")) - net = relay.cast(data, dtype='int8') + net = relay.cast(data, dtype="int8") net = relay.Function([data], net) mod = tvm.IRModule.from_expr(net) mod = relay.transform.InferType()(mod) @@ -76,15 +75,15 @@ def test_cast(self): xgraph = xf_relay.from_relay(mod, {}) layers = xgraph.get_layers() - assert layers[0].type[0] == 'Input' - assert layers[1].type[0] == 'Cast' - assert layers[1].attrs['dtype'] == 'int8' + assert layers[0].type[0] == "Input" + assert layers[1].type[0] == "Cast" + assert layers[1].attrs["dtype"] == "int8" @unittest.skipIf(skip, "Could not import TVM and/or TVM frontend") def test_clip(self): data = relay.var("data", relay.TensorType((-1, 6, 4, 4), "float32")) - net = relay.clip(data, 0., 7.) + net = relay.clip(data, 0.0, 7.0) net = relay.Function([data], net) mod = tvm.IRModule.from_expr(net) mod = relay.transform.InferType()(mod) @@ -92,10 +91,10 @@ def test_clip(self): xgraph = xf_relay.from_relay(mod, {}) layers = xgraph.get_layers() - assert layers[0].type[0] == 'Input' - assert layers[1].type[0] == 'Clip' - assert layers[1].attrs['a_min'] == 0. - assert layers[1].attrs['a_max'] == 7. + assert layers[0].type[0] == "Input" + assert layers[1].type[0] == "Clip" + assert layers[1].attrs["a_min"] == 0.0 + assert layers[1].attrs["a_max"] == 7.0 @unittest.skipIf(skip, "Could not import TVM and/or TVM frontend") def test_ones_like(self): @@ -108,8 +107,8 @@ def test_ones_like(self): xgraph = xf_relay.from_relay(mod, {}) layers = xgraph.get_layers() - assert layers[0].type[0] == 'Constant' - assert layers[1].type[0] == 'AnyOp' + assert layers[0].type[0] == "Constant" + assert layers[1].type[0] == "AnyOp" assert layers[1].shapes == [1, 6, 4, 4] @unittest.skipIf(skip, "Could not import TVM and/or TVM frontend") @@ -123,9 +122,9 @@ def test_leaky_relu(self): xgraph = xf_relay.from_relay(mod, {}) layers = xgraph.get_layers() - assert layers[0].type[0] == 'Input' - assert layers[1].type[0] == 'LeakyReLU' - assert layers[1].attrs['alpha'] == .1 + assert layers[0].type[0] == "Input" + assert layers[1].type[0] == "LeakyReLU" + assert layers[1].attrs["alpha"] == 0.1 # @unittest.skipIf(skip, "Could not import TVM and/or TVM frontend") # def test_prelu(self): @@ -154,8 +153,8 @@ def test_repeat(self): xgraph = xf_relay.from_relay(mod, {}) layers = xgraph.get_layers() - assert layers[0].type[0] == 'Constant' - assert layers[1].type[0] == 'AnyOp' + assert layers[0].type[0] == "Constant" + assert layers[1].type[0] == "AnyOp" assert layers[1].shapes == [8] c = relay.expr.const(np.ones((2, 2), dtype=np.float32)) @@ -167,11 +166,10 @@ def test_repeat(self): xgraph = xf_relay.from_relay(mod, {}) layers = xgraph.get_layers() - assert layers[0].type[0] == 'Constant' - assert layers[1].type[0] == 'AnyOp' + assert layers[0].type[0] == "Constant" + assert layers[1].type[0] == "AnyOp" assert layers[1].shapes == [2, 4] - @unittest.skipIf(skip, "Could not import TVM and/or TVM frontend") def test_reshape(self): # 1 @@ -184,7 +182,7 @@ def test_reshape(self): xgraph = xf_relay.from_relay(mod, {}) layers = xgraph.get_layers() - assert layers[0].type[0] == 'Constant' + assert layers[0].type[0] == "Constant" assert layers[0].shapes == [4, 3, 2] # 2 @@ -197,7 +195,7 @@ def test_reshape(self): xgraph = xf_relay.from_relay(mod, {}) layers = xgraph.get_layers() - assert layers[0].type[0] == 'Constant' + assert layers[0].type[0] == "Constant" assert layers[0].shapes == [6, 1, 4] # 3 @@ -210,7 +208,7 @@ def test_reshape(self): xgraph = xf_relay.from_relay(mod, {}) layers = xgraph.get_layers() - assert layers[0].type[0] == 'Constant' + assert layers[0].type[0] == "Constant" assert layers[0].shapes == [2, 3, 4] # 4 @@ -223,7 +221,7 @@ def test_reshape(self): xgraph = xf_relay.from_relay(mod, {}) layers = xgraph.get_layers() - assert layers[0].type[0] == 'Constant' + assert layers[0].type[0] == "Constant" assert layers[0].shapes == [6, 20] # 5 @@ -236,18 +234,15 @@ def test_reshape(self): xgraph = xf_relay.from_relay(mod, {}) layers = xgraph.get_layers() - assert layers[0].type[0] == 'Input' + assert layers[0].type[0] == "Input" assert layers[0].shapes == [-1, 6, 1, 1] - assert layers[1].type[0] == 'Reshape' + assert layers[1].type[0] == "Reshape" assert layers[1].shapes == [-1, 6] @unittest.skipIf(skip, "Could not import TVM and/or TVM frontend") def test_split_int(self): - data = relay.var( - "data", - relay.TensorType((-1, 6, 4, 4), "float32") - ) + data = relay.var("data", relay.TensorType((-1, 6, 4, 4), "float32")) net = relay.split(data, indices_or_sections=3, axis=1).astuple() @@ -260,24 +255,24 @@ def test_split_int(self): layers = xgraph.get_layers() - assert layers[0].type[0] == 'Input' - assert layers[1].type[0] == 'Split' - assert 'relay_id' in layers[1].attrs - assert layers[1].attrs['axis'] == 1 - assert layers[1].attrs['indices'] == 3 - assert layers[1].shapes == TupleShape([TensorShape([-1, 2, 4, 4]), - TensorShape([-1, 2, 4, 4]), - TensorShape([-1, 2, 4, 4])]) + assert layers[0].type[0] == "Input" + assert layers[1].type[0] == "Split" + assert "relay_id" in layers[1].attrs + assert layers[1].attrs["axis"] == 1 + assert layers[1].attrs["indices"] == 3 + assert layers[1].shapes == TupleShape( + [ + TensorShape([-1, 2, 4, 4]), + TensorShape([-1, 2, 4, 4]), + TensorShape([-1, 2, 4, 4]), + ] + ) @unittest.skipIf(skip, "Could not import TVM and/or TVM frontend") def test_split_tuple(self): - data = relay.var( - "data", - relay.TensorType((-1, 5, 4, 4), "float32") - ) + data = relay.var("data", relay.TensorType((-1, 5, 4, 4), "float32")) - net = relay.split(data, indices_or_sections=(1, 4), axis=1)\ - .astuple() + net = relay.split(data, indices_or_sections=(1, 4), axis=1).astuple() net = relay.Function([data], net) @@ -288,14 +283,18 @@ def test_split_tuple(self): layers = xgraph.get_layers() - assert layers[0].type[0] == 'Input' - assert layers[1].type[0] == 'Split' - assert 'relay_id' in layers[1].attrs - assert layers[1].attrs['axis'] == 1 - assert layers[1].attrs['indices'] == (1, 4) - assert layers[1].shapes == TupleShape([TensorShape([-1, 1, 4, 4]), - TensorShape([-1, 3, 4, 4]), - TensorShape([-1, 1, 4, 4])]) + assert layers[0].type[0] == "Input" + assert layers[1].type[0] == "Split" + assert "relay_id" in layers[1].attrs + assert layers[1].attrs["axis"] == 1 + assert layers[1].attrs["indices"] == (1, 4) + assert layers[1].shapes == TupleShape( + [ + TensorShape([-1, 1, 4, 4]), + TensorShape([-1, 3, 4, 4]), + TensorShape([-1, 1, 4, 4]), + ] + ) @unittest.skipIf(skip, "Could not import TVM and/or TVM frontend") def test_squeeze(self): @@ -308,21 +307,15 @@ def test_squeeze(self): xgraph = xf_relay.from_relay(mod, {}) layers = xgraph.get_layers() - assert layers[0].type[0] == 'Input' - assert layers[1].type[0] == 'Squeeze' + assert layers[0].type[0] == "Input" + assert layers[1].type[0] == "Squeeze" assert layers[1].shapes == [-1, 6] @unittest.skipIf(skip, "Could not import TVM and/or TVM frontend") def test_take(self): - data = relay.var( - "data", - relay.TensorType((-1, 3, 224, 224), "float32") - ) + data = relay.var("data", relay.TensorType((-1, 3, 224, 224), "float32")) - indices = relay.var( - "indices", - relay.TensorType([], "int32") - ) + indices = relay.var("indices", relay.TensorType([], "int32")) net = relay.take(data, indices, axis=1) @@ -331,26 +324,23 @@ def test_take(self): mod = tvm.IRModule.from_expr(net) mod = relay.transform.InferType()(mod) - xgraph = xf_relay.from_relay(mod, {'indices': np.array(0, np.int32)}) + xgraph = xf_relay.from_relay(mod, {"indices": np.array(0, np.int32)}) layers = xgraph.get_layers() - assert layers[0].type[0] == 'Input' - assert layers[1].type[0] == 'Constant' + assert layers[0].type[0] == "Input" + assert layers[1].type[0] == "Constant" assert layers[1].data == np.array(0, np.int32) - assert layers[2].type[0] == 'Take' - assert 'relay_id' in layers[2].attrs - assert layers[2].attrs['axis'] == 1 - assert layers[2].attrs['mode'] == 'clip' + assert layers[2].type[0] == "Take" + assert "relay_id" in layers[2].attrs + assert layers[2].attrs["axis"] == 1 + assert layers[2].attrs["mode"] == "clip" assert layers[2].shapes == [-1, 224, 224] @unittest.skipIf(skip, "Could not import TVM and/or TVM frontend") def test_transpose_constant(self): d = np.zeros((1, 3, 2, 2)) - data = relay.var( - "data", - relay.TensorType((1, 3, 2, 2), "float32") - ) + data = relay.var("data", relay.TensorType((1, 3, 2, 2), "float32")) net = relay.transpose(data, axes=(0, 2, 3, 1)) @@ -359,20 +349,17 @@ def test_transpose_constant(self): mod = tvm.IRModule.from_expr(net) mod = relay.transform.InferType()(mod) - xgraph = xf_relay.from_relay(mod, {'data': d}) + xgraph = xf_relay.from_relay(mod, {"data": d}) layers = xgraph.get_layers() - assert layers[0].type[0] == 'Constant' + assert layers[0].type[0] == "Constant" assert layers[0].shapes == [1, 2, 2, 3] np.testing.assert_array_equal(layers[0].data[0], np.transpose(d, (0, 2, 3, 1))) @unittest.skipIf(skip, "Could not import TVM and/or TVM frontend") def test_transpose(self): - data = relay.var( - "data", - relay.TensorType((-1, 3, 2, 2), "float32") - ) + data = relay.var("data", relay.TensorType((-1, 3, 2, 2), "float32")) net = relay.transpose(data, axes=(0, 2, 3, 1)) @@ -385,9 +372,9 @@ def test_transpose(self): layers = xgraph.get_layers() - assert layers[0].type[0] == 'Input' + assert layers[0].type[0] == "Input" assert layers[0].shapes == [-1, 3, 2, 2] - assert layers[1].type[0] == 'Transpose' + assert layers[1].type[0] == "Transpose" assert layers[1].shapes == [-1, 2, 2, 3] @unittest.skipIf(skip, "Could not import TVM and/or TVM frontend") @@ -401,12 +388,10 @@ def test_zeros_like(self): xgraph = xf_relay.from_relay(mod, {}) layers = xgraph.get_layers() - assert layers[0].type[0] == 'Constant' - assert layers[1].type[0] == 'AnyOp' + assert layers[0].type[0] == "Constant" + assert layers[1].type[0] == "AnyOp" assert layers[1].shapes == [1, 6, 4, 4] - - @unittest.skipIf(skip, "Could not import TVM and/or TVM frontend") def test_arange_full_and_reshape(self): start = relay.expr.const(0.0) @@ -415,14 +400,14 @@ def test_arange_full_and_reshape(self): fill_val = relay.expr.const(1.0) fill_shape = [10, 1] - dtype = 'float32' + dtype = "float32" left = relay.arange(start, stop, step, dtype) left = relay.reshape(left, [-1, 1]) - left = relay.reshape(left, [1,-1]) - + left = relay.reshape(left, [1, -1]) + right = relay.full(fill_val, fill_shape, dtype) - right = relay.reshape(right, [1,-1]) + right = relay.reshape(right, [1, -1]) net = relay.multiply(left, right) @@ -431,30 +416,28 @@ def test_arange_full_and_reshape(self): xgraph = xf_relay.from_relay(mod, params) layers = xgraph.get_layers() - assert len(layers) == 10 - assert layers[0].type[0] == 'Constant' - assert layers[3].type[0] == 'AnyOp' - assert layers[7].type[0] == 'AnyOp' - assert layers[5].shapes == [1,10] - assert layers[8].shapes == [1,10] - - + assert len(layers) == 10 + assert layers[0].type[0] == "Constant" + assert layers[3].type[0] == "AnyOp" + assert layers[7].type[0] == "AnyOp" + assert layers[5].shapes == [1, 10] + assert layers[8].shapes == [1, 10] @unittest.skipIf(skip, "Could not import TVM and/or TVM frontend") def test_full(self): fill_val = relay.expr.const(1.0) fill_shape = [10, 1] - - net = relay.full(fill_val, fill_shape, 'float32') - net = relay.reshape(net,[1, -1]) + + net = relay.full(fill_val, fill_shape, "float32") + net = relay.reshape(net, [1, -1]) mod = tvm.IRModule.from_expr(net) - params={} + params = {} xgraph = xf_relay.from_relay(mod, params) layers = xgraph.get_layers() - - assert layers[0].type[0] == 'Constant' - assert layers[0].shapes == [1] - assert layers[1].type[0] == 'AnyOp' - assert layers[1].shapes == [10, 1] - assert layers[2].type[0] == 'Reshape' - assert layers[2].shapes == [1, 10] + + assert layers[0].type[0] == "Constant" + assert layers[0].shapes == [1] + assert layers[1].type[0] == "AnyOp" + assert layers[1].shapes == [10, 1] + assert layers[2].type[0] == "Reshape" + assert layers[2].shapes == [1, 10] diff --git a/tests/unit/frontend/tvm/relay_tools/test_relay_l4_broadcast_and_reductions.py b/tests/unit/frontend/tvm/relay_tools/test_relay_l4_broadcast_and_reductions.py index 60aa021..dfe5a66 100644 --- a/tests/unit/frontend/tvm/relay_tools/test_relay_l4_broadcast_and_reductions.py +++ b/tests/unit/frontend/tvm/relay_tools/test_relay_l4_broadcast_and_reductions.py @@ -35,7 +35,6 @@ class TestRelayL4BroadcastAndReductions(unittest.TestCase): - @unittest.skipIf(skip, "Could not import TVM and/or TVM frontend") def test_greater(self): left = relay.var("left", relay.TensorType((-1, 4, 2, 2), "float32")) @@ -48,9 +47,9 @@ def test_greater(self): xgraph = xf_relay.from_relay(mod, {}) layers = xgraph.get_layers() - assert layers[0].type[0] == 'Input' - assert layers[1].type[0] == 'Input' - assert layers[2].type[0] == 'Greater' + assert layers[0].type[0] == "Input" + assert layers[1].type[0] == "Input" + assert layers[2].type[0] == "Greater" assert layers[2].shapes == [-1, 4, 2, 2] @unittest.skipIf(skip, "Could not import TVM and/or TVM frontend") @@ -65,9 +64,9 @@ def test_greater_constant(self): xgraph = xf_relay.from_relay(mod, {}) layers = xgraph.get_layers() - assert layers[0].type[0] == 'Input' - assert layers[1].type[0] == 'Constant' - assert layers[2].type[0] == 'Greater' + assert layers[0].type[0] == "Input" + assert layers[1].type[0] == "Constant" + assert layers[2].type[0] == "Greater" assert layers[2].shapes == [-1, 2, 2, 4] @unittest.skipIf(skip, "Could not import TVM and/or TVM frontend") @@ -80,8 +79,8 @@ def test_mean(self): xgraph = xf_relay.from_relay(mod, {}) layers = xgraph.get_layers() - assert layers[0].type[0] == 'Input' - assert layers[1].type[0] == 'Mean' + assert layers[0].type[0] == "Input" + assert layers[1].type[0] == "Mean" assert layers[1].shapes == [-1, 1, 1] @unittest.skipIf(skip, "Could not import TVM and/or TVM frontend") @@ -95,8 +94,8 @@ def test_strided_slice(self): xgraph = xf_relay.from_relay(mod, {}) layers = xgraph.get_layers() - assert layers[0].type[0] == 'Constant' - assert layers[1].type[0] == 'StridedSlice' + assert layers[0].type[0] == "Constant" + assert layers[1].type[0] == "StridedSlice" assert layers[1].shapes == [2, 3, 3] @unittest.skipIf(skip, "Could not import TVM and/or TVM frontend") @@ -112,8 +111,8 @@ def test_where_constant(self): xgraph = xf_relay.from_relay(mod, {}) layers = xgraph.get_layers() - assert layers[0].type[0] == 'Constant' - assert layers[1].type[0] == 'Input' - assert layers[2].type[0] == 'Input' - assert layers[3].type[0] == 'AnyOp' + assert layers[0].type[0] == "Constant" + assert layers[1].type[0] == "Input" + assert layers[2].type[0] == "Input" + assert layers[3].type[0] == "AnyOp" assert layers[3].shapes == [-1, 2, 2, 4] diff --git a/tests/unit/frontend/tvm/relay_tools/test_relay_l5_vision.py b/tests/unit/frontend/tvm/relay_tools/test_relay_l5_vision.py index dd7df5a..8335eac 100644 --- a/tests/unit/frontend/tvm/relay_tools/test_relay_l5_vision.py +++ b/tests/unit/frontend/tvm/relay_tools/test_relay_l5_vision.py @@ -35,7 +35,6 @@ class TestRelayL5VisionOperationConversions(unittest.TestCase): - @unittest.skipIf(skip, "Could not import TVM and/or TVM frontend") def test_image_resize_to_upsampling2d(self): data = relay.var("data", relay.TensorType((1, 20, 20, 32), "float32")) @@ -45,21 +44,21 @@ def test_image_resize_to_upsampling2d(self): size=[40, 40], layout="NHWC", method="nearest_neighbor", - coordinate_transformation_mode="asymmetric" + coordinate_transformation_mode="asymmetric", ) mod = tvm.IRModule.from_expr(net) mod = relay.transform.InferType()(mod) - params={} + params = {} xgraph = xf_relay.from_relay(mod, params) layers = xgraph.get_layers() - assert layers[1].type[0] == 'Transpose' - assert layers[1].shapes == [-1, 32, 20, 20] - assert layers[2].type[0] == 'Upsampling2D' - assert layers[2].shapes == [-1, 32, 40, 40] - assert layers[3].type[0] == 'Transpose' - assert layers[3].shapes == [-1, 40, 40, 32] + assert layers[1].type[0] == "Transpose" + assert layers[1].shapes == [-1, 32, 20, 20] + assert layers[2].type[0] == "Upsampling2D" + assert layers[2].shapes == [-1, 32, 40, 40] + assert layers[3].type[0] == "Transpose" + assert layers[3].shapes == [-1, 40, 40, 32] @unittest.skipIf(skip, "Could not import TVM and/or TVM frontend") def test_image_resize(self): @@ -68,19 +67,19 @@ def test_image_resize(self): net = relay.image.resize( data, size=[40, 40], - layout = "NHWC", + layout="NHWC", method="nearest_neighbor", - coordinate_transformation_mode="half_pixel" + coordinate_transformation_mode="half_pixel", ) mod = tvm.IRModule.from_expr(net) mod = relay.transform.InferType()(mod) - params={} + params = {} xgraph = xf_relay.from_relay(mod, params) layers = xgraph.get_layers() - assert layers[1].type[0] == 'AnyOp' - assert layers[1].shapes == [-1, 40, 40, 32] + assert layers[1].type[0] == "AnyOp" + assert layers[1].shapes == [-1, 40, 40, 32] @unittest.skipIf(skip, "Could not import TVM and/or TVM frontend") def test_yolo_reorg(self): @@ -93,14 +92,14 @@ def test_yolo_reorg(self): xgraph = xf_relay.from_relay(mod, params) layers = xgraph.get_layers() - assert layers[0].type[0] == 'Input' - assert layers[1].type[0] == 'YoloReorg' + assert layers[0].type[0] == "Input" + assert layers[1].type[0] == "YoloReorg" @unittest.skipIf(skip, "Could not import TVM and/or TVM frontend") def test_valid_counts(self): data = relay.var("data", relay.TensorType((-1, 2, 6), "float32")) - net = relay.vision.get_valid_counts(data, score_threshold=1.)[0] + net = relay.vision.get_valid_counts(data, score_threshold=1.0)[0] net = relay.Function(relay.analysis.free_vars(net), net) mod = tvm.IRModule.from_expr(net) mod = relay.transform.InferType()(mod) @@ -108,9 +107,9 @@ def test_valid_counts(self): xgraph = xf_relay.from_relay(mod, {}) layers = xgraph.get_layers() - assert layers[0].type[0] == 'Input' - assert layers[1].type[0] == 'Constant' - assert layers[2].type[0] == 'AnyOp' + assert layers[0].type[0] == "Input" + assert layers[1].type[0] == "Constant" + assert layers[2].type[0] == "AnyOp" assert layers[2].shapes == [[1], [-1, 2, 6], [-1, 2]] @unittest.skipIf(skip, "Could not import TVM and/or TVM frontend") @@ -127,10 +126,10 @@ def test_nms(self): xgraph = xf_relay.from_relay(mod, {}) layers = xgraph.get_layers() - assert layers[0].type[0] == 'Input' - assert layers[1].type[0] == 'Constant' - assert layers[2].type[0] == 'Input' - assert layers[3].type[0] == 'Constant' - assert layers[4].type[0] == 'Constant' - assert layers[5].type[0] == 'AnyOp' + assert layers[0].type[0] == "Input" + assert layers[1].type[0] == "Constant" + assert layers[2].type[0] == "Input" + assert layers[3].type[0] == "Constant" + assert layers[4].type[0] == "Constant" + assert layers[5].type[0] == "AnyOp" assert layers[5].shapes == [[-1, 2], [-1, -1]] diff --git a/tests/unit/frontend/tvm/test_relay.py b/tests/unit/frontend/tvm/test_relay.py index c89f727..8be3097 100644 --- a/tests/unit/frontend/tvm/test_relay.py +++ b/tests/unit/frontend/tvm/test_relay.py @@ -35,13 +35,9 @@ class TestRelayFrontend(unittest.TestCase): - @unittest.skipIf(skip, "Could not import TVM and/or TVM frontend") def test_simple_network(self): - data = relay.var( - "data", - relay.TensorType((-1, 3, 224, 224), "float32") - ) + data = relay.var("data", relay.TensorType((1, 3, 224, 224), "float32")) weight = relay.var("weight") bn_gamma = relay.var("bn_gamma") bn_beta = relay.var("bn_beta") @@ -54,31 +50,24 @@ def test_simple_network(self): weight=weight, kernel_size=(3, 3), channels=16, - padding=(0, 0) + padding=(0, 0), ) simple_net = relay.nn.batch_norm( - simple_net, - bn_gamma, - bn_beta, - bn_mmean, - bn_mvar + simple_net, bn_gamma, bn_beta, bn_mmean, bn_mvar )[0] simple_net = relay.nn.relu(simple_net) simple_net = relay.op.reduce.mean(simple_net, axis=(2, 3)) - simple_net = relay.op.transform.squeeze(simple_net) + simple_net = relay.op.transform.reshape(simple_net, newshape=(1, 16)) dense_weight = relay.var("dense_weight") - dense_bias = relay.var('dense_bias') + dense_bias = relay.var("dense_bias") simple_net = relay.nn.dense(simple_net, weight=dense_weight, units=10) simple_net = relay.nn.bias_add(simple_net, dense_bias, axis=1) simple_net = relay.nn.softmax(simple_net, axis=1) - simple_net = relay.op.transform.reshape(simple_net, newshape=(-1, 10)) + simple_net = relay.op.transform.reshape(simple_net, newshape=(1, 10)) - simple_net = relay.Function( - relay.analysis.free_vars(simple_net), - simple_net - ) + func = relay.Function(relay.analysis.free_vars(simple_net), simple_net) mod, params = testing.create_workload(simple_net) @@ -86,24 +75,21 @@ def test_simple_network(self): layers = xgraph.get_layers() - assert(layers[0].type[0] == 'Input') - assert(layers[1].type[0] == 'Pad') - assert(layers[2].type[0] == 'Convolution') - assert(layers[3].type[0] == 'BatchNorm') - assert(layers[4].type[0] == 'ReLU') - assert(layers[5].type[0] == 'Mean') - assert(layers[6].type[0] == 'Squeeze') - assert(layers[7].type[0] == 'Dense') - assert(layers[8].type[0] == 'BiasAdd') - assert(layers[9].type[0] == 'Softmax') - assert(layers[10].type[0] == 'Reshape') + assert layers[0].type[0] == "Input" + assert layers[1].type[0] == "Pad" + assert layers[2].type[0] == "Convolution" + assert layers[3].type[0] == "BatchNorm" + assert layers[4].type[0] == "ReLU" + assert layers[5].type[0] == "Mean" + assert layers[6].type[0] == "Reshape" + assert layers[7].type[0] == "Dense" + assert layers[8].type[0] == "BiasAdd" + assert layers[9].type[0] == "Softmax" + assert layers[10].type[0] == "Reshape" @unittest.skipIf(skip, "Could not import TVM and/or TVM frontend") def test_simple_network_cvx(self): - data = relay.var( - "data", - relay.TensorType((-1, 3, 224, 224), "float32") - ) + data = relay.var("data", relay.TensorType((1, 3, 224, 224), "float32")) weight = relay.var("weight") bn_gamma = relay.var("bn_gamma") bn_beta = relay.var("bn_beta") @@ -116,43 +102,35 @@ def test_simple_network_cvx(self): weight=weight, kernel_size=(3, 3), channels=16, - padding=(0, 0) + padding=(0, 0), ) simple_net = relay.nn.relu(simple_net) - simple_net = relay.Function( - relay.analysis.free_vars(simple_net), - simple_net - ) + simple_net = relay.Function(relay.analysis.free_vars(simple_net), simple_net) mod, params = testing.create_workload(simple_net) xgraph = xf_relay.from_relay( - mod, - params, - cvx_preprocessing={'data': 'scale-0.5__transpose-2,0,1'} - ) + mod, params, cvx_preprocessing={"data": "scale-0.5__transpose-2,0,1"} + ) layers = xgraph.get_layers() - assert(layers[0].type[0] == 'StrInput') + assert layers[0].type[0] == "StrInput" assert layers[0].shapes == [-1] - assert layers[1].type[0] == 'Cvx' + assert layers[1].type[0] == "Cvx" assert layers[1].shapes == [-1, 3, 224, 224] - assert(layers[2].type[0] == 'Pad') - assert(layers[3].type[0] == 'Convolution') - assert(layers[4].type[0] == 'ReLU') + assert layers[2].type[0] == "Pad" + assert layers[3].type[0] == "Convolution" + assert layers[4].type[0] == "ReLU" - assert(layers[0].tops == ['data_cvx']) - assert(layers[1].bottoms == ['data']) - assert(layers[1].tops[0][:7] == 'nn.pad-') + assert layers[0].tops == ["data_cvx"] + assert layers[1].bottoms == ["data"] + assert layers[1].tops[0][:7] == "nn.pad-" @unittest.skipIf(skip, "Could not import TVM and/or TVM frontend") def test_conv2d_transpose(self): - data = relay.var( - "data", - relay.TensorType((-1, 1, 3, 3), "float32") - ) + data = relay.var("data", relay.TensorType((-1, 1, 3, 3), "float32")) weight = relay.var("weight") simple_net = relay.nn.conv2d_transpose( @@ -162,14 +140,11 @@ def test_conv2d_transpose(self): channels=1, padding=(0, 0), strides=(2, 2), - data_layout='NCHW', - kernel_layout='IOHW' + data_layout="NCHW", + kernel_layout="IOHW", ) - simple_net = relay.Function( - relay.analysis.free_vars(simple_net), - simple_net - ) + simple_net = relay.Function(relay.analysis.free_vars(simple_net), simple_net) mod, params = testing.create_workload(simple_net) @@ -177,22 +152,19 @@ def test_conv2d_transpose(self): layers = xgraph.get_layers() - assert layers[0].type[0] == 'Input' + assert layers[0].type[0] == "Input" assert layers[0].shapes == [-1, 1, 3, 3] - assert layers[1].type[0] == 'Conv2DTranspose' + assert layers[1].type[0] == "Conv2DTranspose" assert layers[1].shapes == [-1, 1, 6, 6] assert layers[1].sizes == [36] - assert layers[1].attrs['padding'] == [[0, 0], [0, 0], [0, 0], [0, 0]] - assert layers[1].attrs['strides'] == [2, 2] - assert layers[1].attrs['dilation'] == [1, 1] + assert layers[1].attrs["padding"] == [[0, 0], [0, 0], [0, 0], [0, 0]] + assert layers[1].attrs["strides"] == [2, 2] + assert layers[1].attrs["dilation"] == [1, 1] @unittest.skipIf(skip, "Could not import TVM and/or TVM frontend") def test_resnet_block(self): - data = relay.var( - "data", - relay.TensorType((-1, 3, 224, 224), "float32") - ) + data = relay.var("data", relay.TensorType((-1, 3, 224, 224), "float32")) weight = relay.var("weight") bn_gamma = relay.var("bn_gamma") bn_beta = relay.var("bn_beta") @@ -200,23 +172,16 @@ def test_resnet_block(self): bn_mvar = relay.var("bn_var") conv2d0_expr = relay.nn.conv2d( - data=data, - weight=weight, - kernel_size=(3, 3), - channels=16, - padding=(1, 1) + data=data, weight=weight, kernel_size=(3, 3), channels=16, padding=(1, 1) ) bn0_expr = relay.nn.batch_norm( - conv2d0_expr, - bn_gamma, - bn_beta, - bn_mmean, - bn_mvar + conv2d0_expr, bn_gamma, bn_beta, bn_mmean, bn_mvar )[0] relu0_expr = relay.nn.relu(bn0_expr) - max_pool0_expr = relay.nn.max_pool2d(relu0_expr, pool_size=(2, 2), - strides=(2, 2)) + max_pool0_expr = relay.nn.max_pool2d( + relu0_expr, pool_size=(2, 2), strides=(2, 2) + ) conv2d1_weight = relay.var("conv2d1_weight") conv2d1_bias = relay.var("conv2d1_bias") @@ -225,24 +190,21 @@ def test_resnet_block(self): weight=conv2d1_weight, kernel_size=(3, 3), channels=16, - padding=(1, 1) + padding=(1, 1), ) - bias_add0_expr = relay.nn.bias_add(conv2d1_expr, conv2d1_bias, - axis=1) + bias_add0_expr = relay.nn.bias_add(conv2d1_expr, conv2d1_bias, axis=1) relu1_expr = relay.nn.relu(bias_add0_expr) add0_expr = relay.op.tensor.add(max_pool0_expr, relu1_expr) - avg_pool0_expr = relay.nn.avg_pool2d(add0_expr, pool_size=(2, 2), - strides=(2, 2)) + avg_pool0_expr = relay.nn.avg_pool2d( + add0_expr, pool_size=(2, 2), strides=(2, 2) + ) global_avg_pool0_expr = relay.op.nn.global_avg_pool2d(avg_pool0_expr) bf_expr = relay.nn.batch_flatten(global_avg_pool0_expr) net = avg_pool0_expr - net = relay.Function( - relay.analysis.free_vars(net), - net - ) + net = relay.Function(relay.analysis.free_vars(net), net) mod, params = testing.create_workload(net) @@ -250,24 +212,21 @@ def test_resnet_block(self): layers = xgraph.get_layers() - assert(layers[0].type[0] == 'Input') - assert(layers[1].type[0] == 'Convolution') - assert(layers[2].type[0] == 'BatchNorm') - assert(layers[3].type[0] == 'ReLU') - assert(layers[4].type[0] == 'Pooling') - assert(layers[5].type[0] == 'Convolution') - assert(layers[6].type[0] == 'BiasAdd') - assert(layers[7].type[0] == 'ReLU') - assert(layers[8].type[0] == 'Eltwise') - assert(layers[9].type[0] == 'Pooling') - assert(layers[9].shapes == [-1, 16, 56, 56]) + assert layers[0].type[0] == "Input" + assert layers[1].type[0] == "Convolution" + assert layers[2].type[0] == "BatchNorm" + assert layers[3].type[0] == "ReLU" + assert layers[4].type[0] == "Pooling" + assert layers[5].type[0] == "Convolution" + assert layers[6].type[0] == "BiasAdd" + assert layers[7].type[0] == "ReLU" + assert layers[8].type[0] == "Eltwise" + assert layers[9].type[0] == "Pooling" + assert layers[9].shapes == [-1, 16, 56, 56] @unittest.skipIf(skip, "Could not import TVM and/or TVM frontend") def test_inception_block(self): - data = relay.var( - "data", - relay.TensorType((-1, 3, 224, 224), "float32") - ) + data = relay.var("data", relay.TensorType((-1, 3, 224, 224), "float32")) weight = relay.var("weight") bn_gamma = relay.var("bn_gamma") bn_beta = relay.var("bn_beta") @@ -275,23 +234,16 @@ def test_inception_block(self): bn_mvar = relay.var("bn_var") conv2d0_expr = relay.nn.conv2d( - data=data, - weight=weight, - kernel_size=(3, 3), - channels=16, - padding=(1, 1) + data=data, weight=weight, kernel_size=(3, 3), channels=16, padding=(1, 1) ) bn0_expr = relay.nn.batch_norm( - conv2d0_expr, - bn_gamma, - bn_beta, - bn_mmean, - bn_mvar + conv2d0_expr, bn_gamma, bn_beta, bn_mmean, bn_mvar )[0] relu0_expr = relay.nn.relu(bn0_expr) - max_pool0_expr = relay.nn.max_pool2d(relu0_expr, pool_size=(2, 2), - strides=(2, 2)) + max_pool0_expr = relay.nn.max_pool2d( + relu0_expr, pool_size=(2, 2), strides=(2, 2) + ) conv2d1_weight = relay.var("conv2d1_weight") conv2d1_bias = relay.var("conv2d1_bias") @@ -301,7 +253,7 @@ def test_inception_block(self): kernel_size=(3, 3), channels=16, padding=(1, 1), - strides=(2, 2) + strides=(2, 2), ) bias_add1_expr = relay.nn.bias_add(conv2d1_expr, conv2d1_bias, axis=1) relu1_expr = relay.nn.relu(bias_add1_expr) @@ -314,22 +266,18 @@ def test_inception_block(self): kernel_size=(3, 3), channels=16, padding=(1, 1), - strides=(2, 2) + strides=(2, 2), ) bias_add2_expr = relay.nn.bias_add(conv2d2_expr, conv2d2_bias, axis=1) relu2_expr = relay.nn.relu(bias_add2_expr) - concat0_expr = relay.op.tensor.concatenate([relu1_expr, relu2_expr], - axis=1) + concat0_expr = relay.op.tensor.concatenate([relu1_expr, relu2_expr], axis=1) global_max_pool0_expr = relay.op.nn.global_max_pool2d(concat0_expr) net = global_max_pool0_expr - net = relay.Function( - relay.analysis.free_vars(net), - net - ) + net = relay.Function(relay.analysis.free_vars(net), net) mod, params = testing.create_workload(net) @@ -337,17 +285,17 @@ def test_inception_block(self): layers = xgraph.get_layers() - assert(layers[0].type[0] == 'Input') - assert(layers[1].type[0] == 'Convolution') - assert(layers[2].type[0] == 'BatchNorm') - assert(layers[3].type[0] == 'ReLU') - assert(layers[4].type[0] == 'Pooling') - assert(layers[5].type[0] == 'Convolution') - assert(layers[6].type[0] == 'BiasAdd') - assert(layers[7].type[0] == 'ReLU') - assert(layers[8].type[0] == 'Convolution') - assert(layers[9].type[0] == 'BiasAdd') - assert(layers[10].type[0] == 'ReLU') - assert(layers[11].type[0] == 'Concat') - assert(layers[12].type[0] == 'Pooling') - assert(layers[12].shapes == [-1, 32, 1, 1]) + assert layers[0].type[0] == "Input" + assert layers[1].type[0] == "Convolution" + assert layers[2].type[0] == "BatchNorm" + assert layers[3].type[0] == "ReLU" + assert layers[4].type[0] == "Pooling" + assert layers[5].type[0] == "Convolution" + assert layers[6].type[0] == "BiasAdd" + assert layers[7].type[0] == "ReLU" + assert layers[8].type[0] == "Convolution" + assert layers[9].type[0] == "BiasAdd" + assert layers[10].type[0] == "ReLU" + assert layers[11].type[0] == "Concat" + assert layers[12].type[0] == "Pooling" + assert layers[12].shapes == [-1, 32, 1, 1] diff --git a/tests/unit/frontend/tvm/test_relay_io.py b/tests/unit/frontend/tvm/test_relay_io.py index eb03e08..ad9de2a 100644 --- a/tests/unit/frontend/tvm/test_relay_io.py +++ b/tests/unit/frontend/tvm/test_relay_io.py @@ -26,7 +26,7 @@ import tvm from tvm import relay from tvm.relay import testing - + skip = False except Exception as e: skip = True @@ -39,13 +39,9 @@ class TestRelayFrontend(unittest.TestCase): - @unittest.skipIf(skip, "Could not import TVM and/or TVM frontend") def test_simple_network(self): - data = relay.var( - "data", - relay.TensorType((-1, 3, 224, 224), "float32") - ) + data = relay.var("data", relay.TensorType((1, 3, 224, 224), "float32")) weight = relay.var("weight") bn_gamma = relay.var("bn_gamma") bn_beta = relay.var("bn_beta") @@ -58,63 +54,56 @@ def test_simple_network(self): weight=weight, kernel_size=(3, 3), channels=16, - padding=(0, 0) + padding=(0, 0), ) simple_net = relay.nn.batch_norm( - simple_net, - bn_gamma, - bn_beta, - bn_mmean, - bn_mvar + simple_net, bn_gamma, bn_beta, bn_mmean, bn_mvar )[0] simple_net = relay.nn.relu(simple_net) simple_net = relay.op.reduce.mean(simple_net, axis=(2, 3)) - simple_net = relay.op.transform.squeeze(simple_net) + simple_net = relay.op.transform.reshape(simple_net, newshape=(1, 16)) dense_weight = relay.var("dense_weight") - dense_bias = relay.var('dense_bias') + dense_bias = relay.var("dense_bias") simple_net = relay.nn.dense(simple_net, weight=dense_weight, units=10) simple_net = relay.nn.bias_add(simple_net, dense_bias, axis=1) simple_net = relay.nn.softmax(simple_net, axis=1) - simple_net = relay.op.transform.reshape(simple_net, newshape=(-1, 10)) + simple_net = relay.op.transform.reshape(simple_net, newshape=(1, 10)) - simple_net = relay.Function( - relay.analysis.free_vars(simple_net), - simple_net - ) + simple_net = relay.Function(relay.analysis.free_vars(simple_net), simple_net) mod, params = testing.create_workload(simple_net) json_file = os.path.join(FILE_DIR, "relay_mod_test.json") - with open(json_file, 'w') as fo: + with open(json_file, "w") as fo: fo.write(tvm.ir.save_json(mod)) params_file = os.path.join(FILE_DIR, "relay_params_test.params") with open(params_file, "wb") as fo: fo.write(relay.save_param_dict(params)) - mod_read, params_read = load_model_from_file('Relay', 'Relay')( + mod_read, params_read = load_model_from_file("Relay", "Relay")( model_path=json_file, - shapes={'data': [-1, 3, 224, 224]}, - opt_model_path=params_file + shapes={"data": [-1, 3, 224, 224]}, + opt_model_path=params_file, ) xgraph = xf_relay.from_relay(mod_read, params_read) layers = xgraph.get_layers() - assert layers[0].type[0] == 'Input' - assert layers[1].type[0] == 'Pad' - assert layers[2].type[0] == 'Convolution' - assert layers[3].type[0] == 'BatchNorm' - assert layers[4].type[0] == 'ReLU' - assert layers[5].type[0] == 'Mean' - assert layers[6].type[0] == 'Squeeze' - assert layers[7].type[0] == 'Dense' - assert layers[8].type[0] == 'BiasAdd' - assert layers[9].type[0] == 'Softmax' - assert layers[10].type[0] == 'Reshape' + assert layers[0].type[0] == "Input" + assert layers[1].type[0] == "Pad" + assert layers[2].type[0] == "Convolution" + assert layers[3].type[0] == "BatchNorm" + assert layers[4].type[0] == "ReLU" + assert layers[5].type[0] == "Mean" + assert layers[6].type[0] == "Reshape" + assert layers[7].type[0] == "Dense" + assert layers[8].type[0] == "BiasAdd" + assert layers[9].type[0] == "Softmax" + assert layers[10].type[0] == "Reshape" os.remove(json_file) os.remove(params_file) diff --git a/tests/unit/graph/ops/test_l1_basic_nn.py b/tests/unit/graph/ops/test_l1_basic_nn.py index b2f43ab..f14435f 100644 --- a/tests/unit/graph/ops/test_l1_basic_nn.py +++ b/tests/unit/graph/ops/test_l1_basic_nn.py @@ -24,294 +24,287 @@ class TestL1BasicNN(unittest.TestCase): - def test_expand_dims_positive_axis(self): iX = XLayer( - type=['Input'], - name='in1', + type=["Input"], + name="in1", shapes=[-1, 4], sizes=[4], bottoms=[], tops=[], - targets=[] + targets=[], ) - edX = xlf.get_xop_factory_func('ExpandDims')('ed1', [iX], - axis=0, - num_newaxis=2) + edX = xlf.get_xop_factory_func("ExpandDims")("ed1", [iX], axis=0, num_newaxis=2) - assert edX.type[0] == 'ExpandDims' - assert edX.attrs['axis'] == 0 - assert edX.attrs['num_newaxis'] == 2 + assert edX.type[0] == "ExpandDims" + assert edX.attrs["axis"] == 0 + assert edX.attrs["num_newaxis"] == 2 assert edX.shapes == [1, 1, -1, 4] - edX = xlf.get_xop_factory_func('ExpandDims')('ed2', [iX], - axis=1, - num_newaxis=2) + edX = xlf.get_xop_factory_func("ExpandDims")("ed2", [iX], axis=1, num_newaxis=2) - assert edX.type[0] == 'ExpandDims' - assert edX.attrs['axis'] == 1 - assert edX.attrs['num_newaxis'] == 2 + assert edX.type[0] == "ExpandDims" + assert edX.attrs["axis"] == 1 + assert edX.attrs["num_newaxis"] == 2 assert edX.shapes == [-1, 1, 1, 4] - edX = xlf.get_xop_factory_func('ExpandDims')('ed3', [iX], - axis=2, - num_newaxis=2) + edX = xlf.get_xop_factory_func("ExpandDims")("ed3", [iX], axis=2, num_newaxis=2) - assert edX.type[0] == 'ExpandDims' - assert edX.attrs['axis'] == 2 - assert edX.attrs['num_newaxis'] == 2 + assert edX.type[0] == "ExpandDims" + assert edX.attrs["axis"] == 2 + assert edX.attrs["num_newaxis"] == 2 assert edX.shapes == [-1, 4, 1, 1] with self.assertRaises(AssertionError): - edX = xlf.get_xop_factory_func('ExpandDims')('ed4', [iX], - axis=3, - num_newaxis=2) + edX = xlf.get_xop_factory_func("ExpandDims")( + "ed4", [iX], axis=3, num_newaxis=2 + ) def test_expand_dims_negative_axis(self): iX = XLayer( - type=['Input'], - name='in1', + type=["Input"], + name="in1", shapes=[-1, 4], sizes=[4], bottoms=[], tops=[], - targets=[] + targets=[], ) - edX = xlf.get_xop_factory_func('ExpandDims')('ed1', [iX], - axis=-1, - num_newaxis=2) + edX = xlf.get_xop_factory_func("ExpandDims")( + "ed1", [iX], axis=-1, num_newaxis=2 + ) - assert edX.type[0] == 'ExpandDims' - assert edX.attrs['axis'] == -1 - assert edX.attrs['num_newaxis'] == 2 + assert edX.type[0] == "ExpandDims" + assert edX.attrs["axis"] == -1 + assert edX.attrs["num_newaxis"] == 2 assert edX.shapes == [-1, 4, 1, 1] - edX = xlf.get_xop_factory_func('ExpandDims')('ed2', [iX], - axis=-2, - num_newaxis=2) + edX = xlf.get_xop_factory_func("ExpandDims")( + "ed2", [iX], axis=-2, num_newaxis=2 + ) - assert edX.type[0] == 'ExpandDims' - assert edX.attrs['axis'] == -2 - assert edX.attrs['num_newaxis'] == 2 + assert edX.type[0] == "ExpandDims" + assert edX.attrs["axis"] == -2 + assert edX.attrs["num_newaxis"] == 2 assert edX.shapes == [-1, 1, 1, 4] - edX = xlf.get_xop_factory_func('ExpandDims')('ed3', [iX], - axis=-3, - num_newaxis=2) + edX = xlf.get_xop_factory_func("ExpandDims")( + "ed3", [iX], axis=-3, num_newaxis=2 + ) - assert edX.type[0] == 'ExpandDims' - assert edX.attrs['axis'] == -3 - assert edX.attrs['num_newaxis'] == 2 + assert edX.type[0] == "ExpandDims" + assert edX.attrs["axis"] == -3 + assert edX.attrs["num_newaxis"] == 2 assert edX.shapes == [1, 1, -1, 4] with self.assertRaises(AssertionError): - edX = xlf.get_xop_factory_func('ExpandDims')('ed4', [iX], - axis=-4, - num_newaxis=2) + edX = xlf.get_xop_factory_func("ExpandDims")( + "ed4", [iX], axis=-4, num_newaxis=2 + ) def test_multiply_layer(self): iX1 = XLayer( - type=['Input'], - name='in1', + type=["Input"], + name="in1", shapes=[-1, 2, 1, 4], sizes=[8], bottoms=[], tops=[], - targets=[] + targets=[], ) iX2 = XLayer( - type=['Input'], - name='in2', + type=["Input"], + name="in2", shapes=[-1, 2, 1, 4], sizes=[8], bottoms=[], tops=[], - targets=[] + targets=[], ) - mX = xlf.get_xop_factory_func('Multiply')('mul2', [iX1, iX2]) + mX = xlf.get_xop_factory_func("Multiply")("mul2", [iX1, iX2]) - assert mX.type[0] == 'Multiply' + assert mX.type[0] == "Multiply" assert mX.shapes == [-1, 2, 1, 4] iX3 = XLayer( - type=['Input'], - name='in3', + type=["Input"], + name="in3", shapes=[-1, 1, 4, 1], sizes=[4], bottoms=[], tops=[], - targets=[] + targets=[], ) - mX = xlf.get_xop_factory_func('Multiply')('mul3', [iX1, iX3]) + mX = xlf.get_xop_factory_func("Multiply")("mul3", [iX1, iX3]) - assert mX.type[0] == 'Multiply' + assert mX.type[0] == "Multiply" assert mX.shapes == [-1, 2, 4, 4] iX4 = XLayer( - type=['Input'], - name='in4', + type=["Input"], + name="in4", shapes=[4, 1], sizes=[4], bottoms=[], tops=[], - targets=[] + targets=[], ) - mX = xlf.get_xop_factory_func('Multiply')('mul4', [iX1, iX4]) + mX = xlf.get_xop_factory_func("Multiply")("mul4", [iX1, iX4]) - assert mX.type[0] == 'Multiply' + assert mX.type[0] == "Multiply" assert mX.shapes == [-1, 2, 4, 4] def test_relu_layer(self): iX = XLayer( - type=['Input'], - name='in1', + type=["Input"], + name="in1", shapes=[1, 2, 4, 4], sizes=[32], bottoms=[], tops=[], - targets=[] + targets=[], ) - rX = px.ops.relu('relu1', [iX]) + rX = px.ops.relu("relu1", [iX]) - assert rX.type[0] == 'ReLU' + assert rX.type[0] == "ReLU" assert rX.shapes == [1, 2, 4, 4] from pyxir.graph.ops.l1_basic_nn import relu_transpose_transform relu_transpose_transform(rX, axes=[0, 2, 3, 1]) - assert rX.type[0] == 'ReLU' + assert rX.type[0] == "ReLU" assert rX.shapes == [1, 4, 4, 2] def test_scaling_layer(self): iX = XLayer( - type=['Input'], - name='in1', + type=["Input"], + name="in1", shapes=[1, 2, 4, 4], sizes=[32], bottoms=[], tops=[], - targets=[] + targets=[], ) gX = XLayer( - type=['Constant'], - name='gamma', + type=["Constant"], + name="gamma", shapes=[2], sizes=[2], - data=[np.array([1., 2.])], + data=[np.array([1.0, 2.0])], bottoms=[], tops=[], - targets=[] + targets=[], ) bX = XLayer( - type=['Constant'], - name='beta', + type=["Constant"], + name="beta", shapes=[2], sizes=[2], - data=[np.array([1., -2.])], + data=[np.array([1.0, -2.0])], bottoms=[], tops=[], - targets=[] + targets=[], ) - sX = xlf.get_xop_factory_func('Scale')('scale1', iX, gX, bX, axis=1) + sX = xlf.get_xop_factory_func("Scale")("scale1", iX, gX, bX, axis=1) - assert sX.type[0] == 'Scale' - assert sX.attrs['axis'] == 1 + assert sX.type[0] == "Scale" + assert sX.attrs["axis"] == 1 - np.testing.assert_array_equal(sX.data.gamma, np.array([1., 2.])) - np.testing.assert_array_equal(sX.data.beta, np.array([1., -2.])) + np.testing.assert_array_equal(sX.data.gamma, np.array([1.0, 2.0])) + np.testing.assert_array_equal(sX.data.beta, np.array([1.0, -2.0])) from pyxir.graph.ops.l1_basic_nn import scale_transpose_transform scale_transpose_transform(sX, axes=[0, 2, 3, 1]) - assert sX.type[0] == 'Scale' + assert sX.type[0] == "Scale" assert sX.shapes == [1, 4, 4, 2] - assert sX.attrs['axis'] == 3 + assert sX.attrs["axis"] == 3 def test_sigmoid_layer(self): iX = XLayer( - type=['Input'], - name='in1', + type=["Input"], + name="in1", shapes=[1, 2, 4, 4], sizes=[32], bottoms=[], tops=[], - targets=[] + targets=[], ) - X = xlf.get_xop_factory_func('Sigmoid')('sig1', [iX]) + X = xlf.get_xop_factory_func("Sigmoid")("sig1", [iX]) - assert X.type[0] == 'Sigmoid' + assert X.type[0] == "Sigmoid" assert X.shapes == [1, 2, 4, 4] assert X.sizes == [32] def test_softmax_layer(self): iX = XLayer( - type=['Input'], - name='in1', + type=["Input"], + name="in1", shapes=[1, 2, 4, 4], sizes=[32], bottoms=[], tops=[], - targets=[] + targets=[], ) - X = xlf.get_xop_factory_func('Softmax')('soft1', [iX]) + X = xlf.get_xop_factory_func("Softmax")("soft1", [iX]) - assert X.type[0] == 'Softmax' + assert X.type[0] == "Softmax" assert X.shapes == [1, 2, 4, 4] assert X.sizes == [32] def test_sqrt_layer(self): iX = XLayer( - type=['Input'], - name='in1', + type=["Input"], + name="in1", shapes=[1, 2, 4, 4], sizes=[32], bottoms=[], tops=[], - targets=[] + targets=[], ) - X = xlf.get_xop_factory_func('Sqrt')('sqrt1', [iX]) + X = xlf.get_xop_factory_func("Sqrt")("sqrt1", [iX]) - assert X.type[0] == 'Sqrt' + assert X.type[0] == "Sqrt" assert X.shapes == [1, 2, 4, 4] assert X.sizes == [32] def test_tanh_layer(self): iX = XLayer( - type=['Input'], - name='in1', + type=["Input"], + name="in1", shapes=[1, 2, 4, 4], sizes=[32], bottoms=[], tops=[], - targets=[] + targets=[], ) - X = xlf.get_xop_factory_func('Tanh')('tanh1', [iX]) + X = xlf.get_xop_factory_func("Tanh")("tanh1", [iX]) - assert X.type[0] == 'Tanh' + assert X.type[0] == "Tanh" assert X.shapes == [1, 2, 4, 4] assert X.sizes == [32] diff --git a/tests/unit/graph/ops/test_l2_convolution.py b/tests/unit/graph/ops/test_l2_convolution.py index fce1484..0599153 100644 --- a/tests/unit/graph/ops/test_l2_convolution.py +++ b/tests/unit/graph/ops/test_l2_convolution.py @@ -12,11 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -""" -Module for testing the XOp factory and property functionality - - -""" +"""Module for testing the XOp factory and property functionality""" import unittest import numpy as np @@ -26,24 +22,25 @@ from pyxir.graph.layer import xlayer_factory as xlf from pyxir.graph import ops +from pyxir.graph.ops.l2_convolution import conv2d_layout_transform -class TestL2Convolution(unittest.TestCase): +class TestL2Convolution(unittest.TestCase): def test_nn_batch_flatten_layer(self): iX = XLayer( - type=['Input'], - name='in1', + type=["Input"], + name="in1", shapes=[1, 1, 1, 4], sizes=[4], bottoms=[], tops=[], - targets=[] + targets=[], ) - sX = px.ops.batch_flatten('flatten1', [iX]) + sX = px.ops.batch_flatten("flatten1", [iX]) - assert sX.type[0] == 'Flatten' + assert sX.type[0] == "Flatten" assert sX.shapes == [1, 4] assert sX.sizes == [4] assert sX.attrs == {} @@ -51,489 +48,473 @@ def test_nn_batch_flatten_layer(self): def test_batchnorm_layer(self): iX = XLayer( - type=['Input'], - name='in1', + type=["Input"], + name="in1", shapes=[1, 2, 4, 4], sizes=[32], bottoms=[], tops=[], - targets=[] + targets=[], ) mX = XLayer( - type=['Constant'], - name='mu', + type=["Constant"], + name="mu", shapes=[2], sizes=[2], - data=[np.array([.5, 1.])], + data=[np.array([0.5, 1.0])], bottoms=[], tops=[], - targets=[] + targets=[], ) sqX = XLayer( - type=['Constant'], - name='sigma_square', + type=["Constant"], + name="sigma_square", shapes=[2], sizes=[2], - data=[np.array([1., 2.])], + data=[np.array([1.0, 2.0])], bottoms=[], tops=[], - targets=[] + targets=[], ) gX = XLayer( - type=['Constant'], - name='gamma', + type=["Constant"], + name="gamma", shapes=[2], sizes=[2], - data=[np.array([1., 2.])], + data=[np.array([1.0, 2.0])], bottoms=[], tops=[], - targets=[] + targets=[], ) bX = XLayer( - type=['Constant'], - name='beta', + type=["Constant"], + name="beta", shapes=[2], sizes=[2], - data=[np.array([1., -2.])], + data=[np.array([1.0, -2.0])], bottoms=[], tops=[], - targets=[] + targets=[], ) - bX = px.ops.batch_norm('bn1', iX, mX, sqX, gX, bX, - axis=1, epsilon=1e-5) + bX = px.ops.batch_norm("bn1", iX, mX, sqX, gX, bX, axis=1, epsilon=1e-5) - assert bX.type[0] == 'BatchNorm' - assert bX.attrs['axis'] == 1 - assert bX.attrs['epsilon'] == 1e-5 + assert bX.type[0] == "BatchNorm" + assert bX.attrs["axis"] == 1 + assert bX.attrs["epsilon"] == 1e-5 - np.testing.assert_array_equal(bX.data.gamma, np.array([1., 2.])) - np.testing.assert_array_equal(bX.data.beta, np.array([1., -2.])) - np.testing.assert_array_equal(bX.data.mu, np.array([.5, 1.])) - np.testing.assert_array_equal( - bX.data.sigma_square, np.array([1., 2.])) + np.testing.assert_array_equal(bX.data.gamma, np.array([1.0, 2.0])) + np.testing.assert_array_equal(bX.data.beta, np.array([1.0, -2.0])) + np.testing.assert_array_equal(bX.data.mu, np.array([0.5, 1.0])) + np.testing.assert_array_equal(bX.data.sigma_square, np.array([1.0, 2.0])) - from pyxir.graph.ops.l2_convolution import \ - batchnorm_transpose_transform + from pyxir.graph.ops.l2_convolution import batchnorm_transpose_transform batchnorm_transpose_transform(bX, axes=[0, 2, 3, 1]) - assert bX.type[0] == 'BatchNorm' + assert bX.type[0] == "BatchNorm" assert bX.shapes == [1, 4, 4, 2] - assert bX.attrs['axis'] == 3 - assert bX.attrs['epsilon'] == 1e-5 - - def test_convolution_layer(self): - - iX = XLayer( - type=['Input'], - name='in1', - shapes=[1, 2, 3, 3], - sizes=[32], - bottoms=[], - tops=[], - targets=[] - ) - - kX = XLayer( - type=['Constant'], - name='kernel', - shapes=[4, 2, 3, 3], - sizes=[54], - data=[np.ones((4, 2, 3, 3), dtype=np.float32)], - bottoms=[], - tops=[], - targets=[] - ) - - X = xlf.get_xop_factory_func('Convolution')( - op_name='conv1', - kernel_size=[3, 3], - strides=[1, 1], - padding_hw=[1, 1], - dilation=[1, 1], - groups=1, - channels=4, - data_layout='NCHW', - kernel_layout='OIHW', + assert bX.attrs["axis"] == 3 + assert bX.attrs["epsilon"] == 1e-5 + + def conv2d_test_util( + self, + in_shape, + weight_shape, + out_shape, + padding=(0, 0), + strides=(1, 1), + dilation=(1, 1), + groups=1, + data_layout="NCHW", + kernel_layout="OIHW", + target_kernel_layout="OIHW", + ): + iX = px.ops.input("in", shape=list(in_shape)) + kX = px.ops.constant("kernel", np.ones(weight_shape, dtype=np.float32)) + kernel_size = [ + weight_shape[kernel_layout.index("H")], + weight_shape[kernel_layout.index("W")], + ] + channels = weight_shape[kernel_layout.index("O")] + X = px.ops.conv2d( + op_name="conv", input_layer=iX, - weights_layer=kX + weights_layer=kX, + kernel_size=kernel_size, + strides=list(strides), + padding_hw=list(padding), + dilation=list(dilation), + groups=groups, + channels=channels, + data_layout=data_layout, + kernel_layout=kernel_layout, + target_kernel_layout=target_kernel_layout, ) - assert X.type[0] == 'Convolution' - assert X.shapes == [1, 4, 3, 3] - assert X.attrs['padding'] == [[0, 0], [0, 0], [1, 1], [1, 1]] - assert X.attrs['data_layout'] == 'NCHW' - assert X.attrs['kernel_layout'] == 'OIHW' - assert X.attrs['shape'] == [1, 4, 3, 3] - assert X.attrs['kernel_size'] == [3, 3] - assert X.attrs['strides'] == [1, 1] - assert X.attrs['groups'] == 1 - assert X.attrs['dilation'] == [1, 1] - assert X.attrs['channels'] == [2, 4] + layout_idx = tuple(["NCHW".index(e) for e in data_layout]) + padding_nchw = [ + [0, 0], + [0, 0], + [padding[0], padding[1]], + [padding[2], padding[3]], + ] + padding = [padding_nchw[e] for e in layout_idx] + + assert X.type[0] == "Convolution" + assert X.shapes == list( + out_shape + ), "Expected out shape: {0}, but got: {1}".format(out_shape, X.shapes) + assert ( + X.attrs["padding"] == padding + ), "Expected padding: {0}, but got: {1}".format(padding, X.attrs["padding"]) + assert X.attrs["data_layout"] == data_layout + assert X.attrs["kernel_layout"] == target_kernel_layout + assert X.attrs["shape"] == list(out_shape) + assert X.attrs["kernel_size"] == kernel_size + assert X.attrs["strides"] == list(strides) + assert X.attrs["groups"] == groups + assert X.attrs["dilation"] == list(dilation) + assert X.attrs["channels"] == [ + weight_shape[kernel_layout.index("I")], + weight_shape[kernel_layout.index("O")], + ] np.testing.assert_array_equal( - X.data.weights, np.ones((4, 2, 3, 3), dtype=np.float32)) + X.data.weights, np.ones(weight_shape, dtype=np.float32) + ) np.testing.assert_array_equal( - X.data.biases, np.zeros((4), dtype=np.float32)) - - from pyxir.graph.ops.l2_convolution import \ - conv2d_layout_transform - - conv2d_layout_transform(X, target_layout='NHWC') - - assert X.type[0] == 'Convolution' - assert X.shapes == [1, 3, 3, 4] - assert X.attrs['data_layout'] == 'NHWC' - assert X.attrs['padding'] == [[0, 0], [1, 1], [1, 1], [0, 0]] + X.data.biases, + np.zeros((weight_shape[kernel_layout.index("O")]), dtype=np.float32), + ) - def test_convolution_layer_tfl(self): + conv2d_layout_transform(X, target_layout="NHWC") + + layout_idx = tuple([data_layout.index(e) for e in "NHWC"]) + trans_layout_idx = tuple(["NCHW".index(e) for e in "NHWC"]) + trans_out_shape = [out_shape[e] for e in layout_idx] + assert X.type[0] == "Convolution" + assert ( + X.shapes == trans_out_shape + ), "Expected out shape: {0}, but got: {1}".format(trans_out_shape, X.shapes) + assert X.attrs["data_layout"] == "NHWC" + padding = [padding_nchw[e] for e in trans_layout_idx] + assert ( + X.attrs["padding"] == padding + ), "Expected padding: {0}, but got: {1}".format(padding, X.attrs["padding"]) - iX = XLayer( - type=['Input'], - name='in1', - shapes=[1, 3, 3, 2], - sizes=[32], - bottoms=[], - tops=[], - targets=[] + def test_convolution_layer(self): + self.conv2d_test_util( + (1, 2, 3, 3), + (4, 2, 3, 3), + (-1, 4, 3, 3), + padding=(1, 1, 1, 1), + strides=(1, 1), + dilation=(1, 1), + groups=1, + data_layout="NCHW", + kernel_layout="OIHW", ) - - kX = XLayer( - type=['Constant'], - name='kernel', - shapes=[4, 3, 3, 2], - sizes=[54], - data=[np.transpose(np.ones((4, 2, 3, 3), dtype=np.float32), (0, 2, 3, 1))], - bottoms=[], - tops=[], - targets=[] + self.conv2d_test_util( + (1, 3, 3, 2), + (4, 2, 3, 3), + (-1, 3, 3, 4), + padding=(1, 1, 1, 1), + strides=(1, 1), + dilation=(1, 1), + groups=1, + data_layout="NHWC", + kernel_layout="OIHW", ) - X = xlf.get_xop_factory_func('Convolution')( - op_name='conv1', - kernel_size=[3, 3], - strides=[1, 1], - padding_hw=[1, 1], - dilation=[1, 1], + def test_convolution_layer_tfl(self): + self.conv2d_test_util( + (1, 3, 3, 2), + (4, 3, 3, 2), + (-1, 3, 3, 4), + padding=(1, 1, 1, 1), + strides=(1, 1), + dilation=(1, 1), groups=1, - channels=4, - data_layout='NHWC', - kernel_layout='OHWI', - input_layer=iX, - weights_layer=kX + data_layout="NHWC", + kernel_layout="OHWI", + target_kernel_layout="OHWI", ) - assert X.type[0] == 'Convolution' - assert X.shapes == [1, 3, 3, 4] - assert X.attrs['padding'] == [[0, 0], [1, 1], [1, 1], [0, 0]] - assert X.attrs['data_layout'] == 'NHWC' - assert X.attrs['kernel_layout'] == 'OIHW' - assert X.attrs['shape'] == [1, 3, 3, 4] - assert X.attrs['kernel_size'] == [3, 3] - assert X.attrs['strides'] == [1, 1] - assert X.attrs['groups'] == 1 - assert X.attrs['dilation'] == [1, 1] - assert X.attrs['channels'] == [2, 4] - - np.testing.assert_array_equal( - X.data.weights, np.ones((4, 2, 3, 3), dtype=np.float32)) - np.testing.assert_array_equal( - X.data.biases, np.zeros((4), dtype=np.float32)) - - from pyxir.graph.ops.l2_convolution import \ - conv2d_layout_transform - - conv2d_layout_transform(X, target_layout='NCHW') - - assert X.type[0] == 'Convolution' - assert X.shapes == [1, 4, 3, 3] - assert X.attrs['data_layout'] == 'NCHW' - assert X.attrs['padding'] == [[0, 0], [0, 0], [1, 1], [1, 1]] - def test_depthwise_convolution_layer(self): iX = XLayer( - type=['Input'], - name='in1', + type=["Input"], + name="in1", shapes=[1, 8, 3, 3], sizes=[72], bottoms=[], tops=[], - targets=[] + targets=[], ) kX = XLayer( - type=['Constant'], - name='kernel', + type=["Constant"], + name="kernel", shapes=[4, 2, 3, 3], sizes=[54], data=[np.ones((4, 2, 3, 3), dtype=np.float32)], bottoms=[], tops=[], - targets=[] + targets=[], ) - X = xlf.get_xop_factory_func('Convolution')( - op_name='conv1', + X = xlf.get_xop_factory_func("Convolution")( + op_name="conv1", kernel_size=[3, 3], strides=[1, 1], padding_hw=[1, 1], dilation=[1, 1], groups=4, channels=4, - data_layout='NCHW', - kernel_layout='OIHW', + data_layout="NCHW", + kernel_layout="OIHW", input_layer=iX, - weights_layer=kX + weights_layer=kX, ) - assert X.type[0] == 'Convolution' + assert X.type[0] == "Convolution" assert X.shapes == [1, 4, 3, 3] - assert X.attrs['padding'] == [[0, 0], [0, 0], [1, 1], [1, 1]] - assert X.attrs['data_layout'] == 'NCHW' - assert X.attrs['kernel_layout'] == 'OIHW' - assert X.attrs['shape'] == [1, 4, 3, 3] - assert X.attrs['kernel_size'] == [3, 3] - assert X.attrs['strides'] == [1, 1] - assert X.attrs['groups'] == 4 - assert X.attrs['dilation'] == [1, 1] - assert X.attrs['channels'] == [8, 4] + assert X.attrs["padding"] == [[0, 0], [0, 0], [1, 1], [1, 1]] + assert X.attrs["data_layout"] == "NCHW" + assert X.attrs["kernel_layout"] == "OIHW" + assert X.attrs["shape"] == [1, 4, 3, 3] + assert X.attrs["kernel_size"] == [3, 3] + assert X.attrs["strides"] == [1, 1] + assert X.attrs["groups"] == 4 + assert X.attrs["dilation"] == [1, 1] + assert X.attrs["channels"] == [8, 4] np.testing.assert_array_equal( - X.data.weights, np.ones((4, 2, 3, 3), dtype=np.float32)) - np.testing.assert_array_equal( - X.data.biases, np.zeros((4), dtype=np.float32)) + X.data.weights, np.ones((4, 2, 3, 3), dtype=np.float32) + ) + np.testing.assert_array_equal(X.data.biases, np.zeros((4), dtype=np.float32)) - from pyxir.graph.ops.l2_convolution import \ - conv2d_layout_transform + from pyxir.graph.ops.l2_convolution import conv2d_layout_transform - conv2d_layout_transform(X, target_layout='NHWC') + conv2d_layout_transform(X, target_layout="NHWC") - assert X.type[0] == 'Convolution' + assert X.type[0] == "Convolution" assert X.shapes == [1, 3, 3, 4] - assert X.attrs['data_layout'] == 'NHWC' - assert X.attrs['padding'] == [[0, 0], [1, 1], [1, 1], [0, 0]] + assert X.attrs["data_layout"] == "NHWC" + assert X.attrs["padding"] == [[0, 0], [1, 1], [1, 1], [0, 0]] def test_conv2d_transpose_layer(self): iX = XLayer( - type=['Input'], - name='in1', + type=["Input"], + name="in1", shapes=[1, 2, 3, 3], sizes=[32], bottoms=[], tops=[], - targets=[] + targets=[], ) kX = XLayer( - type=['Constant'], - name='kernel', + type=["Constant"], + name="kernel", shapes=[4, 2, 3, 3], sizes=[54], data=[np.ones((4, 2, 3, 3), dtype=np.float32)], bottoms=[], tops=[], - targets=[] + targets=[], ) - X = xlf.get_xop_factory_func('Conv2DTranspose')( - op_name='conv1', + X = xlf.get_xop_factory_func("Conv2DTranspose")( + op_name="conv1", kernel_size=[3, 3], strides=[1, 1], padding_hw=[0, 0], dilation=[1, 1], groups=1, channels=4, - data_layout='NCHW', - kernel_layout='OIHW', + data_layout="NCHW", + kernel_layout="OIHW", input_layer=iX, - weights_layer=kX + weights_layer=kX, ) - assert X.type[0] == 'Conv2DTranspose' + assert X.type[0] == "Conv2DTranspose" assert X.shapes == [1, 4, 5, 5] - assert X.attrs['padding'] == [[0, 0], [0, 0], [0, 0], [0, 0]] - assert X.attrs['data_layout'] == 'NCHW' - assert X.attrs['kernel_layout'] == 'OIHW' - assert X.attrs['shape'] == [1, 4, 5, 5] - assert X.attrs['kernel_size'] == [3, 3] - assert X.attrs['strides'] == [1, 1] - assert X.attrs['groups'] == 1 - assert X.attrs['dilation'] == [1, 1] - assert X.attrs['channels'] == [2, 4] + assert X.attrs["padding"] == [[0, 0], [0, 0], [0, 0], [0, 0]] + assert X.attrs["data_layout"] == "NCHW" + assert X.attrs["kernel_layout"] == "OIHW" + assert X.attrs["shape"] == [1, 4, 5, 5] + assert X.attrs["kernel_size"] == [3, 3] + assert X.attrs["strides"] == [1, 1] + assert X.attrs["groups"] == 1 + assert X.attrs["dilation"] == [1, 1] + assert X.attrs["channels"] == [2, 4] np.testing.assert_array_equal( - X.data.weights, np.ones((4, 2, 3, 3), dtype=np.float32)) - np.testing.assert_array_equal( - X.data.biases, np.zeros((4), dtype=np.float32)) + X.data.weights, np.ones((4, 2, 3, 3), dtype=np.float32) + ) + np.testing.assert_array_equal(X.data.biases, np.zeros((4), dtype=np.float32)) - from pyxir.graph.ops.l2_convolution import \ - conv2d_transpose_layout_transform + from pyxir.graph.ops.l2_convolution import conv2d_transpose_layout_transform - conv2d_transpose_layout_transform(X, target_layout='NHWC') + conv2d_transpose_layout_transform(X, target_layout="NHWC") - assert X.type[0] == 'Conv2DTranspose' + assert X.type[0] == "Conv2DTranspose" assert X.shapes == [1, 5, 5, 4] - assert X.attrs['padding'] == [[0, 0], [0, 0], [0, 0], [0, 0]] - assert X.attrs['data_layout'] == 'NHWC' + assert X.attrs["padding"] == [[0, 0], [0, 0], [0, 0], [0, 0]] + assert X.attrs["data_layout"] == "NHWC" def test_global_pooling_layer(self): iX = XLayer( - type=['Input'], - name='in1', + type=["Input"], + name="in1", shapes=[1, 2, 7, 7], sizes=[98], bottoms=[], tops=[], - targets=[] + targets=[], ) - X = xlf.get_xop_factory_func('GlobalPooling')( - op_name='gp1', - pool_type='Max', - layout='NCHW', - input_layer=iX + X = xlf.get_xop_factory_func("GlobalPooling")( + op_name="gp1", pool_type="Max", layout="NCHW", input_layer=iX ) - assert X.type[0] == 'Pooling' + assert X.type[0] == "Pooling" assert X.shapes == [1, 2, 1, 1] - assert X.attrs['padding'] == [[0, 0], [0, 0], [0, 0], [0, 0]] - assert X.attrs['insize'] == [7, 7] - assert X.attrs['outsize'] == [1, 1] - assert X.attrs['data_layout'] == 'NCHW' - assert X.attrs['strides'] == [1, 1] - assert X.attrs['kernel_size'] == [7, 7] - assert X.attrs['pool_type'] == 'Max' + assert X.attrs["padding"] == [[0, 0], [0, 0], [0, 0], [0, 0]] + assert X.attrs["insize"] == [7, 7] + assert X.attrs["outsize"] == [1, 1] + assert X.attrs["data_layout"] == "NCHW" + assert X.attrs["strides"] == [1, 1] + assert X.attrs["kernel_size"] == [7, 7] + assert X.attrs["pool_type"] == "Max" - from pyxir.graph.ops.l2_convolution import \ - pooling_layout_transform + from pyxir.graph.ops.l2_convolution import pooling_layout_transform - pooling_layout_transform(X, target_layout='NHWC') + pooling_layout_transform(X, target_layout="NHWC") - assert X.type[0] == 'Pooling' + assert X.type[0] == "Pooling" assert X.shapes == [1, 1, 1, 2] - assert X.attrs['data_layout'] == 'NHWC' + assert X.attrs["data_layout"] == "NHWC" def test_pad_layer(self): iX = XLayer( - type=['Input'], - name='in1', + type=["Input"], + name="in1", shapes=[1, 2, 7, 7], sizes=[98], bottoms=[], tops=[], - targets=[] + targets=[], ) - X = xlf.get_xop_factory_func('Pad')( - op_name='pad1', + X = xlf.get_xop_factory_func("Pad")( + op_name="pad1", padding=[[0, 0], [0, 0], [1, 0], [1, 0]], pad_value=0, - input_layer=iX + input_layer=iX, ) - assert X.type[0] == 'Pad' + assert X.type[0] == "Pad" assert X.shapes == [1, 2, 8, 8] assert X.sizes == [128] - assert X.attrs['padding'] == [[0, 0], [0, 0], [1, 0], [1, 0]] + assert X.attrs["padding"] == [[0, 0], [0, 0], [1, 0], [1, 0]] - from pyxir.graph.ops.l2_convolution import \ - padding_transpose_transform + from pyxir.graph.ops.l2_convolution import padding_transpose_transform padding_transpose_transform(X, axes=(0, 2, 3, 1)) - assert X.type[0] == 'Pad' + assert X.type[0] == "Pad" assert X.shapes == [1, 8, 8, 2] - assert X.attrs['padding'] == [[0, 0], [1, 0], [1, 0], [0, 0]] + assert X.attrs["padding"] == [[0, 0], [1, 0], [1, 0], [0, 0]] def test_pooling_layer(self): iX = XLayer( - type=['Input'], - name='in1', + type=["Input"], + name="in1", shapes=[1, 2, 5, 5], sizes=[50], bottoms=[], tops=[], - targets=[] + targets=[], ) - X = xlf.get_xop_factory_func('Pooling')( - op_name='pool1', + X = xlf.get_xop_factory_func("Pooling")( + op_name="pool1", input_layer=iX, - pool_type='Avg', + pool_type="Avg", pool_size=[3, 3], strides=[2, 2], padding=[1, 1], - layout='NCHW', + layout="NCHW", ceil_mode=True, - count_include_pad=True + count_include_pad=True, ) - assert X.type[0] == 'Pooling' + assert X.type[0] == "Pooling" assert X.shapes == [1, 2, 3, 3] - assert X.attrs['padding'] == [[0, 0], [0, 0], [1, 1], [1, 1]] - assert X.attrs['insize'] == [5, 5] - assert X.attrs['outsize'] == [3, 3] - assert X.attrs['data_layout'] == 'NCHW' - assert X.attrs['strides'] == [2, 2] - assert X.attrs['kernel_size'] == [3, 3] - assert X.attrs['pool_type'] == 'Avg' + assert X.attrs["padding"] == [[0, 0], [0, 0], [1, 1], [1, 1]] + assert X.attrs["insize"] == [5, 5] + assert X.attrs["outsize"] == [3, 3] + assert X.attrs["data_layout"] == "NCHW" + assert X.attrs["strides"] == [2, 2] + assert X.attrs["kernel_size"] == [3, 3] + assert X.attrs["pool_type"] == "Avg" - from pyxir.graph.ops.l2_convolution import \ - pooling_layout_transform + from pyxir.graph.ops.l2_convolution import pooling_layout_transform - pooling_layout_transform(X, target_layout='NHWC') + pooling_layout_transform(X, target_layout="NHWC") - assert X.type[0] == 'Pooling' + assert X.type[0] == "Pooling" assert X.shapes == [1, 3, 3, 2] - assert X.attrs['padding'] == [[0, 0], [1, 1], [1, 1], [0, 0]] - assert X.attrs['data_layout'] == 'NHWC' + assert X.attrs["padding"] == [[0, 0], [1, 1], [1, 1], [0, 0]] + assert X.attrs["data_layout"] == "NHWC" def test_nn_upsampling2d(self): iX = XLayer( - type=['Input'], - name='in1', + type=["Input"], + name="in1", shapes=[1, 4, 2, 2], sizes=[16], bottoms=[], tops=[], - targets=[] + targets=[], ) - sX = xlf.get_xop_factory_func('Upsampling2D')( - 'ups1', + sX = xlf.get_xop_factory_func("Upsampling2D")( + "ups1", [iX], scale_h=3, scale_w=2, - data_layout='NCHW', - method='nearest_neighbor', - align_corners=False + data_layout="NCHW", + method="nearest_neighbor", + align_corners=False, ) - assert sX.type[0] == 'Upsampling2D' + assert sX.type[0] == "Upsampling2D" assert sX.shapes == [1, 4, 6, 4] assert sX.sizes == [96] - assert sX.attrs['scale_h'] == 3 - assert sX.attrs['scale_w'] == 2 - assert sX.attrs['data_layout'] == 'NCHW' - assert sX.attrs['method'] == 'nearest_neighbor' - assert sX.attrs['align_corners'] is False + assert sX.attrs["scale_h"] == 3 + assert sX.attrs["scale_w"] == 2 + assert sX.attrs["data_layout"] == "NCHW" + assert sX.attrs["method"] == "nearest_neighbor" + assert sX.attrs["align_corners"] is False - from pyxir.graph.ops.l2_convolution import \ - upsampling2d_layout_transform + from pyxir.graph.ops.l2_convolution import upsampling2d_layout_transform - upsampling2d_layout_transform(sX, target_layout='NHWC') + upsampling2d_layout_transform(sX, target_layout="NHWC") - assert sX.type[0] == 'Upsampling2D' + assert sX.type[0] == "Upsampling2D" assert sX.shapes == [1, 6, 4, 4] - assert sX.attrs['data_layout'] == 'NHWC' + assert sX.attrs["data_layout"] == "NHWC" diff --git a/tests/unit/graph/ops/test_l3_math_and_transformations.py b/tests/unit/graph/ops/test_l3_math_and_transformations.py index b1872f4..752efdc 100644 --- a/tests/unit/graph/ops/test_l3_math_and_transformations.py +++ b/tests/unit/graph/ops/test_l3_math_and_transformations.py @@ -29,277 +29,284 @@ class TestL3MathAndTransformations(unittest.TestCase): - def test_clip_layer(self): iX = XLayer( - type=['Input'], - name='in1', + type=["Input"], + name="in1", shapes=[1, 2, 4, 4], sizes=[32], bottoms=[], tops=[], - targets=[] + targets=[], ) - sX = px.ops.clip('clip', iX, a_min=0., a_max=10.) + sX = px.ops.clip("clip", iX, a_min=0.0, a_max=10.0) - assert sX.type[0] == 'Clip' + assert sX.type[0] == "Clip" assert sX.shapes == [1, 2, 4, 4] assert sX.sizes == [32] - assert sX.attrs['a_min'] == 0. - assert sX.attrs['a_max'] == 10. - assert sX.bottoms == ['in1'] + assert sX.attrs["a_min"] == 0.0 + assert sX.attrs["a_max"] == 10.0 + assert sX.bottoms == ["in1"] - from pyxir.graph.ops.l3_math_and_transformations import \ - clip_transpose_transform + from pyxir.graph.ops.l3_math_and_transformations import clip_transpose_transform clip_transpose_transform(sX, (0, 2, 3, 1)) - assert sX.type[0] == 'Clip' + assert sX.type[0] == "Clip" assert sX.shapes == [1, 4, 4, 2] assert sX.sizes == [32] def test_relu6_layer(self): iX = XLayer( - type=['Input'], - name='in1', + type=["Input"], + name="in1", shapes=[1, 2, 4, 4], sizes=[32], bottoms=[], tops=[], - targets=[] + targets=[], ) - sX = px.ops.clip('clip1', iX, a_min=0., a_max=6.) + sX = px.ops.clip("clip1", iX, a_min=0.0, a_max=6.0) - assert sX.type[0] == 'ReLU6' + assert sX.type[0] == "ReLU6" assert sX.shapes == [1, 2, 4, 4] assert sX.sizes == [32] assert sX.attrs == {} - assert sX.bottoms == ['in1'] + assert sX.bottoms == ["in1"] - from pyxir.graph.ops.l3_math_and_transformations import \ - relu6_transpose_transform + from pyxir.graph.ops.l3_math_and_transformations import ( + relu6_transpose_transform, + ) relu6_transpose_transform(sX, (0, 2, 3, 1)) - assert sX.type[0] == 'ReLU6' + assert sX.type[0] == "ReLU6" assert sX.shapes == [1, 4, 4, 2] assert sX.sizes == [32] def test_leaky_relu_layer(self): iX = XLayer( - type=['Input'], - name='in1', + type=["Input"], + name="in1", shapes=[1, 2, 4, 4], sizes=[32], bottoms=[], tops=[], - targets=[] + targets=[], ) - sX = px.ops.leaky_relu('leaky_relu', [iX], alpha=0.1) + sX = px.ops.leaky_relu("leaky_relu", [iX], alpha=0.1) - assert sX.type[0] == 'LeakyReLU' + assert sX.type[0] == "LeakyReLU" assert sX.shapes == [1, 2, 4, 4] assert sX.sizes == [32] - assert sX.attrs == {'alpha': 0.1} - assert sX.bottoms == ['in1'] + assert sX.attrs == {"alpha": 0.1} + assert sX.bottoms == ["in1"] - from pyxir.graph.ops.l3_math_and_transformations import \ - leaky_relu_transpose_transform + from pyxir.graph.ops.l3_math_and_transformations import ( + leaky_relu_transpose_transform, + ) leaky_relu_transpose_transform(sX, (0, 2, 3, 1)) - assert sX.type[0] == 'LeakyReLU' + assert sX.type[0] == "LeakyReLU" assert sX.shapes == [1, 4, 4, 2] assert sX.sizes == [32] def test_nn_prelu_layer(self): iX = XLayer( - type=['Input'], - name='in1', + type=["Input"], + name="in1", shapes=[1, 2, 4, 4], sizes=[32], bottoms=[], tops=[], - targets=[] + targets=[], ) - sX = px.ops.prelu('prelu1', iX, alpha=0.2, axis=1) + sX = px.ops.prelu("prelu1", iX, alpha=0.2, axis=1) - assert sX.type[0] == 'pReLU' + assert sX.type[0] == "pReLU" assert sX.shapes == [1, 2, 4, 4] assert sX.sizes == [32] - assert sX.attrs['alpha'] == 0.2 - assert sX.bottoms == ['in1'] + assert sX.attrs["alpha"] == 0.2 + assert sX.bottoms == ["in1"] - from pyxir.graph.ops.l3_math_and_transformations import \ - prelu_transpose_transform + from pyxir.graph.ops.l3_math_and_transformations import ( + prelu_transpose_transform, + ) prelu_transpose_transform(sX, (0, 2, 3, 1)) - assert sX.type[0] == 'pReLU' + assert sX.type[0] == "pReLU" assert sX.shapes == [1, 4, 4, 2] assert sX.sizes == [32] - assert sX.attrs['alpha'] == 0.2 + assert sX.attrs["alpha"] == 0.2 def test_reshape_layer(self): iX = XLayer( - type=['Input'], - name='in1', + type=["Input"], + name="in1", shapes=[1, 4, 1, 1], sizes=[4], bottoms=[], tops=[], - targets=[] + targets=[], ) - sX = px.ops.reshape('reshape1', iX, newshape=[1, 4]) + sX = px.ops.reshape("reshape1", iX, newshape=[1, 4]) - assert sX.type[0] == 'Reshape' + assert sX.type[0] == "Reshape" assert sX.shapes == [1, 4] assert sX.sizes == [4] - assert sX.attrs['shape'] == [1, 4] - assert sX.bottoms == ['in1'] + assert sX.attrs["shape"] == [1, 4] + assert sX.bottoms == ["in1"] def test_split_layer_int(self): iX = XLayer( - type=['Input'], - name='in1', + type=["Input"], + name="in1", shapes=[1, 6, 4, 4], sizes=[96], bottoms=[], tops=[], - targets=[] + targets=[], ) - sX = px.ops.split('split1', [iX], axis=1, indices=3) + sX = px.ops.split("split1", [iX], axis=1, indices=3) - assert sX.type[0] == 'Split' - assert sX.shapes == TupleShape([TensorShape([1, 2, 4, 4]), - TensorShape([1, 2, 4, 4]), - TensorShape([1, 2, 4, 4])]) + assert sX.type[0] == "Split" + assert sX.shapes == TupleShape( + [ + TensorShape([1, 2, 4, 4]), + TensorShape([1, 2, 4, 4]), + TensorShape([1, 2, 4, 4]), + ] + ) assert sX.sizes == [32, 32, 32] - assert sX.attrs['axis'] == 1 - assert sX.attrs['indices'] == 3 - assert sX.bottoms == ['in1'] + assert sX.attrs["axis"] == 1 + assert sX.attrs["indices"] == 3 + assert sX.bottoms == ["in1"] def test_split_layer_tuple(self): iX = XLayer( - type=['Input'], - name='in1', + type=["Input"], + name="in1", shapes=[1, 5, 4, 4], sizes=[80], bottoms=[], tops=[], - targets=[] + targets=[], ) - sX = px.ops.split('split1', [iX], axis=1, indices=[1, 4]) + sX = px.ops.split("split1", [iX], axis=1, indices=[1, 4]) - assert sX.type[0] == 'Split' - assert sX.shapes == TupleShape([TensorShape([1, 1, 4, 4]), - TensorShape([1, 3, 4, 4]), - TensorShape([1, 1, 4, 4])]) + assert sX.type[0] == "Split" + assert sX.shapes == TupleShape( + [ + TensorShape([1, 1, 4, 4]), + TensorShape([1, 3, 4, 4]), + TensorShape([1, 1, 4, 4]), + ] + ) assert sX.sizes == [16, 48, 16] - assert sX.attrs['axis'] == 1 - assert sX.attrs['indices'] == (1, 4) - assert sX.bottoms == ['in1'] + assert sX.attrs["axis"] == 1 + assert sX.attrs["indices"] == (1, 4) + assert sX.bottoms == ["in1"] def test_squeeze_layer(self): iX = XLayer( - type=['Input'], - name='in1', + type=["Input"], + name="in1", shapes=[1, 4, 1, 1], sizes=[4], bottoms=[], tops=[], - targets=[] + targets=[], ) - sX = px.ops.squeeze('squeeze1', iX, axis=[2, 3]) + sX = px.ops.squeeze("squeeze1", iX, axis=[2, 3]) - assert sX.type[0] == 'Squeeze' + assert sX.type[0] == "Squeeze" assert sX.shapes == [1, 4] assert sX.sizes == [4] - assert sX.attrs['axis'] == [2, 3] - assert sX.bottoms == ['in1'] + assert sX.attrs["axis"] == [2, 3] + assert sX.bottoms == ["in1"] def test_take_layer(self): iX = XLayer( - type=['Input'], - name='in1', + type=["Input"], + name="in1", shapes=[1, 3, 4, 4], sizes=[48], bottoms=[], tops=[], - targets=[] + targets=[], ) indX1 = XLayer( - type=['Constant'], - name='indices', + type=["Constant"], + name="indices", shapes=[], sizes=[], data=[np.array(0, dtype=np.int32)], bottoms=[], tops=[], - targets=[] + targets=[], ) - tX = xlf.get_xop_factory_func('Take')('take1', [iX, indX1], - axis=1, - mode='clip') + tX = xlf.get_xop_factory_func("Take")("take1", [iX, indX1], axis=1, mode="clip") - assert tX.type[0] == 'Take' - assert tX.attrs['axis'] == 1 - assert tX.attrs['mode'] == 'clip' - assert tX.bottoms == ['in1', 'indices'] + assert tX.type[0] == "Take" + assert tX.attrs["axis"] == 1 + assert tX.attrs["mode"] == "clip" + assert tX.bottoms == ["in1", "indices"] assert tX.shapes == [1, 4, 4] assert tX.sizes == [16] indX2 = XLayer( - type=['Constant'], - name='indices', + type=["Constant"], + name="indices", shapes=[2], sizes=[2], data=[np.array([0, 2], dtype=np.int32)], bottoms=[], tops=[], - targets=[] + targets=[], ) - tX = px.ops.take('take2', [iX, indX2], axis=1, mode='clip') + tX = px.ops.take("take2", [iX, indX2], axis=1, mode="clip") - assert tX.type[0] == 'Take' - assert tX.attrs['axis'] == 1 - assert tX.attrs['mode'] == 'clip' - assert tX.bottoms == ['in1', 'indices'] + assert tX.type[0] == "Take" + assert tX.attrs["axis"] == 1 + assert tX.attrs["mode"] == "clip" + assert tX.bottoms == ["in1", "indices"] assert tX.shapes == [1, 2, 4, 4] assert tX.sizes == [32] def test_transpose_layer(self): iX = XLayer( - type=['Input'], - name='in1', + type=["Input"], + name="in1", shapes=[1, 2, 4, 4], sizes=[32], bottoms=[], tops=[], - targets=[] + targets=[], ) - sX = px.ops.transpose('t1', iX, axes=[0, 2, 3, 1]) + sX = px.ops.transpose("t1", iX, axes=[0, 2, 3, 1]) - assert sX.type[0] == 'Transpose' + assert sX.type[0] == "Transpose" assert sX.shapes == [1, 4, 4, 2] assert sX.sizes == [32] - assert sX.attrs['axes'] == [0, 2, 3, 1] - assert sX.bottoms == ['in1'] + assert sX.attrs["axes"] == [0, 2, 3, 1] + assert sX.bottoms == ["in1"] diff --git a/tests/unit/graph/ops/test_l4_broadcast_and_reductions.py b/tests/unit/graph/ops/test_l4_broadcast_and_reductions.py index a58bd00..d82f3e3 100644 --- a/tests/unit/graph/ops/test_l4_broadcast_and_reductions.py +++ b/tests/unit/graph/ops/test_l4_broadcast_and_reductions.py @@ -29,82 +29,79 @@ class TestL4BroadcastAndReductions(unittest.TestCase): - def test_mean_layer_basic(self): iX = XLayer( - type=['Input'], - name='in1', + type=["Input"], + name="in1", shapes=[1, 2, 4, 4], sizes=[32], bottoms=[], tops=[], - targets=[] + targets=[], ) # sX = xlf.get_xop_factory_func('Mean')( # 'mean', iX, [2, 3], True, False) - sX = px.ops.mean('mean', iX, axes=[2, 3], keepdims=True, exclude=False) + sX = px.ops.mean("mean", iX, axes=[2, 3], keepdims=True, exclude=False) - assert sX.type[0] == 'Mean' + assert sX.type[0] == "Mean" assert sX.shapes.tolist() == [1, 2, 1, 1] assert sX.sizes == [2] - assert sX.attrs['axes'] == [2, 3] - assert sX.attrs['keepdims'] is True - assert sX.bottoms == ['in1'] + assert sX.attrs["axes"] == [2, 3] + assert sX.attrs["keepdims"] is True + assert sX.bottoms == ["in1"] - from pyxir.graph.ops.l4_broadcast_and_reductions import \ - mean_transpose_transform + from pyxir.graph.ops.l4_broadcast_and_reductions import mean_transpose_transform mean_transpose_transform(sX, (0, 2, 3, 1)) - assert sX.type[0] == 'Mean' + assert sX.type[0] == "Mean" assert sX.shapes == [1, 1, 1, 2] - assert sX.attrs['axes'] == [1, 2] + assert sX.attrs["axes"] == [1, 2] def test_mean_layer_keepdims(self): iX = XLayer( - type=['Input'], - name='in1', + type=["Input"], + name="in1", shapes=[1, 2, 4, 4], sizes=[32], bottoms=[], tops=[], - targets=[] + targets=[], ) # sX = xlf.get_xop_factory_func('Mean')( # 'mean', [2, 3], False, False, iX) - sX = px.ops.mean('mean', iX, axes=[2, 3], keepdims=False, - exclude=False) + sX = px.ops.mean("mean", iX, axes=[2, 3], keepdims=False, exclude=False) - assert sX.type[0] == 'Mean' + assert sX.type[0] == "Mean" assert sX.shapes.tolist() == [1, 2] assert sX.sizes == [2] - assert sX.attrs['axes'] == [2, 3] - assert sX.attrs['keepdims'] is False - assert sX.bottoms == ['in1'] + assert sX.attrs["axes"] == [2, 3] + assert sX.attrs["keepdims"] is False + assert sX.bottoms == ["in1"] def test_mean_layer_exclude(self): iX = XLayer( - type=['Input'], - name='in1', + type=["Input"], + name="in1", shapes=[1, 2, 4, 4], sizes=[32], bottoms=[], tops=[], - targets=[] + targets=[], ) # sX = xlf.get_xop_factory_func('Mean')( # 'mean', [0, 1], False, True, iX) - sX = px.ops.mean('mean', iX, axes=[0, 1], keepdims=False, exclude=True) + sX = px.ops.mean("mean", iX, axes=[0, 1], keepdims=False, exclude=True) - assert sX.type[0] == 'Mean' + assert sX.type[0] == "Mean" assert sX.shapes.tolist() == [1, 2] assert sX.sizes == [2] - assert sX.attrs['axes'] == [2, 3] - assert sX.attrs['keepdims'] is False - assert sX.bottoms == ['in1'] + assert sX.attrs["axes"] == [2, 3] + assert sX.attrs["keepdims"] is False + assert sX.bottoms == ["in1"] diff --git a/tests/unit/graph/ops/test_l5_vision.py b/tests/unit/graph/ops/test_l5_vision.py index 81a6cbd..ef51f44 100644 --- a/tests/unit/graph/ops/test_l5_vision.py +++ b/tests/unit/graph/ops/test_l5_vision.py @@ -27,46 +27,44 @@ class TestL5Vision(unittest.TestCase): - def test_cvx_input_nchw(self): iX = XLayer( - type=['StrInput'], - name='in1', + type=["StrInput"], + name="in1", shapes=[-1], sizes=[1], bottoms=[], tops=[], - targets=[] + targets=[], ) cvx_key = "scale-0.5__transpose-2,0,1" - cX = xlf.get_xop_factory_func('Cvx')('cvx1', iX, cvx_key, - [-1, 3, 10, 10], - 'float32') + cX = xlf.get_xop_factory_func("Cvx")( + "cvx1", iX, cvx_key, [-1, 3, 10, 10], "float32" + ) - assert cX.type[0] == 'Cvx' - assert cX.attrs['cvx_key'] == "scale-0.5__transpose-2,0,1" + assert cX.type[0] == "Cvx" + assert cX.attrs["cvx_key"] == "scale-0.5__transpose-2,0,1" assert cX.shapes == [-1, 3, 10, 10] def test_yolo_reorg(self): iX = XLayer( - type=['Input'], - name='in1', + type=["Input"], + name="in1", shapes=[1, 2, 4, 4], sizes=[32], bottoms=[], tops=[], - targets=[] + targets=[], ) - sX = xlf.get_xop_factory_func('YoloReorg')( - 'yr1', iX, 2, 'NCHW') + sX = xlf.get_xop_factory_func("YoloReorg")("yr1", iX, 2, "NCHW") - assert sX.type[0] == 'YoloReorg' + assert sX.type[0] == "YoloReorg" assert sX.shapes == [1, 8, 2, 2] assert sX.sizes == [32] - assert sX.attrs['stride'] == 2 - assert sX.attrs['layout'] == 'NCHW' - assert sX.bottoms == ['in1'] + assert sX.attrs["stride"] == 2 + assert sX.attrs["layout"] == "NCHW" + assert sX.bottoms == ["in1"] diff --git a/tests/unit/runtime/tensorflow_rt/ops/test_tf_l2_convolutions.py b/tests/unit/runtime/tensorflow_rt/ops/test_tf_l2_convolutions.py index 25e4e81..cac1b4a 100644 --- a/tests/unit/runtime/tensorflow_rt/ops/test_tf_l2_convolutions.py +++ b/tests/unit/runtime/tensorflow_rt/ops/test_tf_l2_convolutions.py @@ -35,64 +35,54 @@ class TestTfL2Convolutions(unittest.TestCase): - def test_conv2d(self): + def create_conv2d_layers(self, in_name, in_shape, weights, strides, padding, dilation, + groups, data_layout="NCHW", kernel_layout="OIHW", + target_kernel_layout="OIHW"): tf.compat.v1.reset_default_graph() - K = np.reshape(np.array([[[1, 2], [3, 4]], - [[5, 6], [7, 8]]], - dtype=np.float32), - (2, 1, 2, 2)) - B = np.array([0, 0], dtype=np.float32) - - X = xlayer.XLayer( - name='test_conv2d', - type=['Convolution'], - shapes=[1, 2, 3, 3], - sizes=[18], - bottoms=['input'], - tops=[], - data=xlayer.ConvData(K, B), - attrs={ - 'data_layout': 'NCHW', - 'kernel_layout': 'OIHW', - 'padding': [[0, 0], [0, 0], [0, 0], [0, 0]], - 'strides': [1, 1], - 'dilation': [1, 1], - 'groups': 1 - }, - targets=[] - ) - - input_shapes = { - 'input': TensorShape([1, 1, 4, 4]) - } - inputs = { - 'input': np.ones((1, 1, 4, 4), dtype=np.float32) - } + in_layer = px.ops.input("in", shape=in_shape[:]) + weights_layer = px.ops.constant("w", weights) + kernel_size = [weights_layer.shapes[kernel_layout.index("H")], + weights_layer.shapes[kernel_layout.index("W")]] + channels = weights_layer.shapes[kernel_layout.index("O")] + X = px.ops.conv2d("conv", in_layer, weights_layer, kernel_size, strides, + padding, dilation, groups, channels, data_layout, kernel_layout, + target_kernel_layout) + + input_shapes = {in_name: TensorShape(in_shape)} params = { - 'test_conv2d_kernel': np.reshape(np.array([[[1, 2], [3, 4]], - [[5, 6], [7, 8]]], - dtype=np.float32), - (2, 1, 2, 2)), - 'test_conv2d_biases': np.array([0, 0], dtype=np.float32) + 'conv_kernel': weights, + 'conv_biases': np.array([0, 0], dtype=np.float32) } - layers = base.get_conv2d_layer(ConvLayer, - ConstantLayer)( - X, input_shapes, params) - assert(len(layers) == 3) + layers = base.get_conv2d_layer(ConvLayer, ConstantLayer)(X, input_shapes, params) + return layers - inputs.update(params) + def execute_layers(self, layers, inputs): for layer in layers: - - # print("-----------------------") - # print("Run layer: {}".format(layer.name)) - inpts = [inputs[name] for name in layer.inputs] - outpt = layer.forward_exec(inpts) - - # print("Output:", outpt.shape, outpt) - - inputs[layer.name] = outpt + out = layer.forward_exec(inpts) + inputs[layer.name] = out + return out + def test_conv2d_basic(self): + K = np.reshape(np.array([[[1, 2], [3, 4]], + [[5, 6], [7, 8]]], + dtype=np.float32), + (2, 1, 2, 2)) + + in_shape = [1, 1, 4, 4] + layers = self.create_conv2d_layers( + in_name="in", + in_shape=in_shape, + weights=K, + strides=[1, 1], + padding=[0, 0, 0, 0], + dilation=[1, 1], + groups=1 + ) + assert len(layers) == 3 + inputs = {"in": np.ones(in_shape, dtype=np.float32)} + + out = self.execute_layers(layers, inputs) expected_outpt = np.array([[[[10., 10., 10.], [10., 10., 10.], [10., 10., 10.]], @@ -100,7 +90,34 @@ def test_conv2d(self): [26., 26., 26.], [26., 26., 26.]]]]) - np.testing.assert_array_equal(outpt, expected_outpt) + np.testing.assert_array_equal(out, expected_outpt) + + def test_conv2d_dilation(self): + K = np.reshape(np.array([[[1, 2], [3, 4]], + [[5, 6], [7, 8]]], + dtype=np.float32), + (2, 1, 2, 2)) + + in_shape = [1, 1, 4, 4] + layers = self.create_conv2d_layers( + in_name="in", + in_shape=in_shape, + weights=K, + strides=[1, 1], + padding=[0, 0, 0, 0], + dilation=[2, 2], + groups=1 + ) + assert len(layers) == 3 + inputs = {"in": np.ones(in_shape, dtype=np.float32)} + + out = self.execute_layers(layers, inputs) + expected_outpt = np.array([[[[10., 10.], + [10., 10.]], + [[26., 26.], + [26., 26.]]]]) + + np.testing.assert_array_equal(out, expected_outpt) def test_conv2d_tfl(self): tf.compat.v1.reset_default_graph() @@ -109,58 +126,24 @@ def test_conv2d_tfl(self): dtype=np.float32), (2, 1, 2, 2)), (0, 2, 3, 1)) - B = np.array([0, 0], dtype=np.float32) - X = xlayer.XLayer( - name='test_conv2d_tfl', - type=['Convolution'], - shapes=[1, 3, 3, 2], - sizes=[18], - bottoms=['input'], - tops=[], - data=xlayer.ConvData(K, B), - attrs={ - 'data_layout': 'NHWC', - 'kernel_layout': 'OHWI', - 'padding': [[0, 0], [0, 0], [0, 0], [0, 0]], - 'strides': [1, 1], - 'dilation': [1, 1], - 'groups': 1 - }, - targets=[] + in_shape = [1, 4, 4, 1] + layers = self.create_conv2d_layers( + in_name="in", + in_shape=in_shape, + weights=K, + strides=[1, 1], + padding=[0, 0, 0, 0], + dilation=[1, 1], + groups=1, + data_layout="NHWC", + kernel_layout="OHWI", + target_kernel_layout="OHWI" ) - - input_shapes = { - 'input': TensorShape([1, 4, 4, 1]) - } - inputs = { - 'input': np.transpose(np.ones((1, 1, 4, 4), dtype=np.float32), (0, 2, 3, 1)) - } - params = { - 'test_conv2d_tfl_kernel': np.transpose(np.reshape(np.array([[[1, 2], [3, 4]], - [[5, 6], [7, 8]]], - dtype=np.float32), - (2, 1, 2, 2)), - (0, 2, 3, 1)), - 'test_conv2d_tfl_biases': np.array([0, 0], dtype=np.float32) - } - layers = base.get_conv2d_layer(ConvLayer, - ConstantLayer)( - X, input_shapes, params) - assert(len(layers) == 3) - - inputs.update(params) - for layer in layers: - - # print("-----------------------") - # print("Run layer: {}".format(layer.name)) - - inpts = [inputs[name] for name in layer.inputs] - outpt = layer.forward_exec(inpts) - - # print("Output:", outpt.shape, outpt) - - inputs[layer.name] = outpt + assert len(layers) == 3 + inputs = {"in": np.ones(in_shape, dtype=np.float32)} + + out = self.execute_layers(layers, inputs) expected_outpt = np.transpose(np.array([[[[10., 10., 10.], [10., 10., 10.], @@ -170,7 +153,7 @@ def test_conv2d_tfl(self): [26., 26., 26.]]]]), (0, 2, 3, 1)) - np.testing.assert_array_equal(outpt, expected_outpt) + np.testing.assert_array_equal(out, expected_outpt) def test_conv2d_transpose(self): tf.compat.v1.reset_default_graph()