From 6b82771343ba991dea0a6398cd68a13bf1324b7d Mon Sep 17 00:00:00 2001 From: Jeremy Sadler <53983960+jezsadler@users.noreply.github.com> Date: Fri, 3 Nov 2023 18:51:07 +0000 Subject: [PATCH] Fixing yet more lint issues --- src/omlt/gbt/gbt_formulation.py | 12 +- src/omlt/gbt/model.py | 14 +- src/omlt/io/onnx_parser.py | 130 ++++++++++++++----- src/omlt/neuralnet/layer.py | 20 ++- src/omlt/neuralnet/layers/full_space.py | 11 +- src/omlt/neuralnet/layers/partition_based.py | 4 +- src/omlt/neuralnet/layers/reduced_space.py | 4 +- tests/io/test_onnx_parser.py | 49 +++---- tests/neuralnet/test_network_definition.py | 13 +- tests/neuralnet/test_nn_formulation.py | 92 ++++++++----- 10 files changed, 236 insertions(+), 113 deletions(-) diff --git a/src/omlt/gbt/gbt_formulation.py b/src/omlt/gbt/gbt_formulation.py index 3c2ca27d..a3d9e878 100644 --- a/src/omlt/gbt/gbt_formulation.py +++ b/src/omlt/gbt/gbt_formulation.py @@ -198,16 +198,22 @@ def _branching_y(tree_id, branch_node_id): feature_id = nodes_feature_ids[node_mask] branch_value = nodes_values[node_mask] if len(branch_value) != 1: - raise ValueError(f"The given tree_id and branch_node_id do not uniquely identify a branch value.") + raise ValueError( + f"The given tree_id and branch_node_id do not uniquely identify a branch value." + ) if len(feature_id) != 1: - raise ValueError(f"The given tree_id and branch_node_id do not uniquely identify a feature.") + raise ValueError( + f"The given tree_id and branch_node_id do not uniquely identify a feature." + ) feature_id = feature_id[0] branch_value = branch_value[0] (branch_y_idx,) = np.where( branch_value_by_feature_id[feature_id] == branch_value ) if len(branch_y_idx) != 1: - raise ValueError(f"The given tree_id and branch_node_id do not uniquely identify a branch index.") + raise ValueError( + f"The given tree_id and branch_node_id do not uniquely identify a branch index." + ) return block.y[feature_id, branch_y_idx[0]] def _sum_of_z_l(tree_id, start_node_id): diff --git a/src/omlt/gbt/model.py b/src/omlt/gbt/model.py index 574f1fb9..c45d516b 100644 --- a/src/omlt/gbt/model.py +++ b/src/omlt/gbt/model.py @@ -57,7 +57,9 @@ def _model_num_inputs(model): """Returns the number of input variables""" graph = model.graph if len(graph.input) != 1: - raise ValueError(f"Model graph input field is multi-valued {graph.input}. A single value is required.") + raise ValueError( + f"Model graph input field is multi-valued {graph.input}. A single value is required." + ) return _tensor_size(graph.input[0]) @@ -65,7 +67,9 @@ def _model_num_outputs(model): """Returns the number of output variables""" graph = model.graph if len(graph.output) != 1: - raise ValueError(f"Model graph output field is multi-valued {graph.output}. A single value is required.") + raise ValueError( + f"Model graph output field is multi-valued {graph.output}. A single value is required." + ) return _tensor_size(graph.output[0]) @@ -73,7 +77,11 @@ def _tensor_size(tensor): """Returns the size of an input tensor""" tensor_type = tensor.type.tensor_type size = None - dim_values = [dim.dim_value for dim in tensor_type.shape.dim if dim.dim_value is not None and dim.dim_value > 0] + dim_values = [ + dim.dim_value + for dim in tensor_type.shape.dim + if dim.dim_value is not None and dim.dim_value > 0 + ] if len(dim_values) == 1: size = dim_values[0] elif dim_values == []: diff --git a/src/omlt/io/onnx_parser.py b/src/omlt/io/onnx_parser.py index cf71f8a3..9e7ab3ba 100644 --- a/src/omlt/io/onnx_parser.py +++ b/src/omlt/io/onnx_parser.py @@ -74,14 +74,16 @@ def parse_network(self, graph, scaling_object, input_bounds): size.append(dim.dim_value) dim_value *= dim.dim_value if dim_value is None: - raise ValueError(f"All dimensions in graph \"{graph.name}\" input tensor have 0 value.") + raise ValueError( + f'All dimensions in graph "{graph.name}" input tensor have 0 value.' + ) assert network_input is None network_input = InputLayer(size) self._node_map[input.name] = network_input network.add_layer(network_input) if network_input is None: - raise ValueError(f"No valid input layer found in graph \"{graph.name}\".") + raise ValueError(f'No valid input layer found in graph "{graph.name}".') self._nodes = nodes self._nodes_by_output = nodes_by_output @@ -116,7 +118,9 @@ def parse_network(self, graph, scaling_object, input_bounds): value = _parse_constant_value(node) self._constants[output] = value else: - raise ValueError(f"Nodes must have inputs or have op_type \"Constant\". Node \"{node.name}\" has no inputs and op_type \"{node.op_type}\".") + raise ValueError( + f'Nodes must have inputs or have op_type "Constant". Node "{node.name}" has no inputs and op_type "{node.op_type}".' + ) # traverse graph self._node_stack = list(inputs) @@ -173,23 +177,31 @@ def _visit_node(self, node, next_nodes): def _consume_dense_nodes(self, node, next_nodes): """Starting from a MatMul node, consume nodes to form a dense Ax + b node.""" if node.op_type != "MatMul": - raise ValueError(f"{node.name} is a {node.op_type} node, only MatMul nodes can be used as starting points for consumption.") + raise ValueError( + f"{node.name} is a {node.op_type} node, only MatMul nodes can be used as starting points for consumption." + ) if len(node.input) != 2: - raise ValueError(f"{node.name} input has {len(node.input)} dimensions, only nodes with 2 input dimensions can be used as starting points for consumption.") + raise ValueError( + f"{node.name} input has {len(node.input)} dimensions, only nodes with 2 input dimensions can be used as starting points for consumption." + ) [in_0, in_1] = list(node.input) input_layer, transformer = self._node_input_and_transformer(in_0) node_weights = self._initializers[in_1] if len(next_nodes) != 1: - raise ValueError(f"Next nodes must have length 1, {next_nodes} has length {len(next_nodes)}") + raise ValueError( + f"Next nodes must have length 1, {next_nodes} has length {len(next_nodes)}" + ) # expect 'Add' node ahead type_, node, maybe_next_nodes = self._nodes[next_nodes[0]] if type_ != "node": raise TypeError(f"Expected a node next, got a {type_} instead.") if node.op_type != "Add": - raise ValueError(f"The first node to be consumed, {node.name}, is a {node.op_type} node. Only Add nodes are supported.") + raise ValueError( + f"The first node to be consumed, {node.name}, is a {node.op_type} node. Only Add nodes are supported." + ) # extract biases next_nodes = maybe_next_nodes @@ -205,9 +217,13 @@ def _consume_dense_nodes(self, node, next_nodes): if len(node_weights.shape) != 2: raise ValueError(f"Node weights must be a 2-dimensional matrix.") if node_weights.shape[1] != node_biases.shape[0]: - raise ValueError(f"Node weights has {node_weights.shape[1]} columns; node biases has {node_biases.shape[0]} rows. These must be equal.") + raise ValueError( + f"Node weights has {node_weights.shape[1]} columns; node biases has {node_biases.shape[0]} rows. These must be equal." + ) if len(node.output) != 1: - raise ValueError(f"Node output is {node.output} but should be a single value.") + raise ValueError( + f"Node output is {node.output} but should be a single value." + ) input_output_size = _get_input_output_size(input_layer, transformer) @@ -239,9 +255,13 @@ def _consume_dense_nodes(self, node, next_nodes): def _consume_gemm_dense_nodes(self, node, next_nodes): """Starting from a Gemm node, consume nodes to form a dense aAB + bC node.""" if node.op_type != "Gemm": - raise ValueError(f"{node.name} is a {node.op_type} node, only Gemm nodes can be used as starting points for consumption.") + raise ValueError( + f"{node.name} is a {node.op_type} node, only Gemm nodes can be used as starting points for consumption." + ) if len(node.input) != 3: - raise ValueError(f"{node.name} input has {len(node.input)} dimensions, only nodes with 3 input dimensions can be used as starting points for consumption.") + raise ValueError( + f"{node.name} input has {len(node.input)} dimensions, only nodes with 3 input dimensions can be used as starting points for consumption." + ) attr = _collect_attributes(node) alpha = attr["alpha"] @@ -290,9 +310,13 @@ def _consume_conv_nodes(self, node, next_nodes): (optional) activation function. """ if node.op_type != "Conv": - raise ValueError(f"{node.name} is a {node.op_type} node, only Conv nodes can be used as starting points for consumption.") + raise ValueError( + f"{node.name} is a {node.op_type} node, only Conv nodes can be used as starting points for consumption." + ) if len(node.input) not in [2,3]: - raise ValueError(f"{node.name} input has {len(node.input)} dimensions, only nodes with 2 or 3 input dimensions can be used as starting points for consumption.") + raise ValueError( + f"{node.name} input has {len(node.input)} dimensions, only nodes with 2 or 3 input dimensions can be used as starting points for consumption." + ) if len(node.input) == 2: [in_0, in_1] = list(node.input) @@ -314,25 +338,41 @@ def _consume_conv_nodes(self, node, next_nodes): strides = attr["strides"] # check only kernel shape and stride are set if attr["kernel_shape"] != kernel_shape: - raise ValueError(f"Kernel shape attribute {attr['kernel_shape']} does not match initialized kernel shape {kernel_shape}.") + raise ValueError( + f"Kernel shape attribute {attr['kernel_shape']} does not match initialized kernel shape {kernel_shape}." + ) if len(kernel_shape) != len(strides): - raise ValueError(f"Initialized kernel shape {kernel_shape} has {len(kernel_shape)} dimensions. Strides attribute has {len(strides)} dimensions. These must be equal.") + raise ValueError( + f"Initialized kernel shape {kernel_shape} has {len(kernel_shape)} dimensions. Strides attribute has {len(strides)} dimensions. These must be equal." + ) if len(input_output_size) != len(kernel_shape) + 1: - raise ValueError(f"Input/output size ({input_output_size}) must have one more dimension than initialized kernel shape ({kernel_shape}).") + raise ValueError( + f"Input/output size ({input_output_size}) must have one more dimension than initialized kernel shape ({kernel_shape})." + ) # Check input, output have correct dimensions if biases.shape != (out_channels,): - raise ValueError(f"Biases shape {biases.shape} must match output weights channels {(out_channels,)}.") + raise ValueError( + f"Biases shape {biases.shape} must match output weights channels {(out_channels,)}." + ) if in_channels != input_output_size[0]: - raise ValueError(f"Input/output size ({input_output_size}) first dimension must match input weights channels ({in_channels}).") + raise ValueError( + f"Input/output size ({input_output_size}) first dimension must match input weights channels ({in_channels})." + ) # Other attributes are not supported if "dilations" in attr and attr["dilations"] != [1, 1]: - raise ValueError(f"{node} has non-identity dilations ({attr['dilations']}). This is not supported.") + raise ValueError( + f"{node} has non-identity dilations ({attr['dilations']}). This is not supported." + ) if attr["group"] != 1: - raise ValueError(f"{node} has multiple groups ({attr['group']}). This is not supported.") + raise ValueError( + f"{node} has multiple groups ({attr['group']}). This is not supported." + ) if "pads" in attr and np.any(attr["pads"]): - raise ValueError(f"{node} has non-zero pads ({attr['pads']}). This is not supported.") + raise ValueError( + f"{node} has non-zero pads ({attr['pads']}). This is not supported." + ) # generate new nodes for the node output padding = 0 @@ -353,7 +393,9 @@ def _consume_conv_nodes(self, node, next_nodes): # convolute image one channel at the time # expect 2d image with channels if len(input_output_size) != 3: - raise ValueError(f"Expected a 2D image with channels, got {input_output_size}.") + raise ValueError( + f"Expected a 2D image with channels, got {input_output_size}." + ) conv_layer = ConvLayer2D( input_output_size, @@ -371,9 +413,13 @@ def _consume_conv_nodes(self, node, next_nodes): def _consume_reshape_nodes(self, node, next_nodes): """Parse a Reshape node.""" if node.op_type != "Reshape": - raise ValueError(f"{node.name} is a {node.op_type} node, only Reshape nodes can be used as starting points for consumption.") + raise ValueError( + f"{node.name} is a {node.op_type} node, only Reshape nodes can be used as starting points for consumption." + ) if len(node.input) != 2: - raise ValueError(f"{node.name} input has {len(node.input)} dimensions, only nodes with 2 input dimensions can be used as starting points for consumption.") + raise ValueError( + f"{node.name} input has {len(node.input)} dimensions, only nodes with 2 input dimensions can be used as starting points for consumption." + ) [in_0, in_1] = list(node.input) input_layer = self._node_map[in_0] new_shape = self._constants[in_1] @@ -388,14 +434,20 @@ def _consume_pool_nodes(self, node, next_nodes): (optional) activation function. """ if node.op_type not in _POOLING_OP_TYPES: - raise ValueError(f"{node.name} is a {node.op_type} node, only MaxPool nodes can be used as starting points for consumption.") + raise ValueError( + f"{node.name} is a {node.op_type} node, only MaxPool nodes can be used as starting points for consumption." + ) pool_func_name = "max" # ONNX network should not contain indices output from MaxPool - not supported by OMLT if len(node.output) != 1: - raise ValueError(f"The ONNX contains indices output from MaxPool. This is not supported by OMLT.") + raise ValueError( + f"The ONNX contains indices output from MaxPool. This is not supported by OMLT." + ) if len(node.input) != 1: - raise ValueError(f"{node.name} input has {len(node.input)} dimensions, only nodes with 1 input dimension can be used as starting points for consumption.") + raise ValueError( + f"{node.name} input has {len(node.input)} dimensions, only nodes with 1 input dimension can be used as starting points for consumption." + ) input_layer, transformer = self._node_input_and_transformer(node.input[0]) input_output_size = _get_input_output_size(input_layer, transformer) @@ -405,7 +457,9 @@ def _consume_pool_nodes(self, node, next_nodes): # this means there is an extra dimension for number of batches # batches not supported, so only accept if they're not there or there is only 1 batch if input_output_size[0] != 1: - raise ValueError(f"{node.name} has {input_output_size[0]} batches, only a single batch is supported.") + raise ValueError( + f"{node.name} has {input_output_size[0]} batches, only a single batch is supported." + ) input_output_size = input_output_size[1:] in_channels = input_output_size[0] @@ -418,15 +472,25 @@ def _consume_pool_nodes(self, node, next_nodes): # check only kernel shape, stride, storage order are set # everything else is not supported if "dilations" in attr and attr["dilations"] != [1, 1]: - raise ValueError(f"{node.name} has non-identity dilations ({attr['dilations']}). This is not supported.") + raise ValueError( + f"{node.name} has non-identity dilations ({attr['dilations']}). This is not supported." + ) if "pads" in attr and np.any(attr["pads"]): - raise ValueError(f"{node.name} has non-zero pads ({attr['pads']}). This is not supported.") + raise ValueError( + f"{node.name} has non-zero pads ({attr['pads']}). This is not supported." + ) if ("auto_pad" in attr) and (attr["auto_pad"] != "NOTSET"): - raise ValueError(f"{node.name} has autopad set ({attr['auto_pad']}). This is not supported.") + raise ValueError( + f"{node.name} has autopad set ({attr['auto_pad']}). This is not supported." + ) if len(kernel_shape) != len(strides): - raise ValueError(f"Kernel shape {kernel_shape} has {len(kernel_shape)} dimensions. Strides attribute has {len(strides)} dimensions. These must be equal.") + raise ValueError( + f"Kernel shape {kernel_shape} has {len(kernel_shape)} dimensions. Strides attribute has {len(strides)} dimensions. These must be equal." + ) if len(input_output_size) != len(kernel_shape) + 1: - raise ValueError(f"Input/output size ({input_output_size}) must have one more dimension than kernel shape ({kernel_shape}).") + raise ValueError( + f"Input/output size ({input_output_size}) must have one more dimension than kernel shape ({kernel_shape})." + ) output_shape_wrapper = math.floor if "ceil_mode" in attr and attr["ceil_mode"] == 1: diff --git a/src/omlt/neuralnet/layer.py b/src/omlt/neuralnet/layer.py index 45f1acba..15ed022b 100644 --- a/src/omlt/neuralnet/layer.py +++ b/src/omlt/neuralnet/layer.py @@ -24,9 +24,13 @@ def __init__( self, input_size, output_size, *, activation=None, input_index_mapper=None ): if not isinstance(input_size, (list,tuple)): - raise TypeError(f"input_size must be a list or tuple, {type(input_size)} was provided.") + raise TypeError( + f"input_size must be a list or tuple, {type(input_size)} was provided." + ) if not isinstance(output_size, (list,tuple)): - raise TypeError(f"output_size must be a list or tuple, {type(output_size)} was provided.") + raise TypeError( + f"output_size must be a list or tuple, {type(output_size)} was provided." + ) self.__input_size = list(input_size) self.__output_size = list(output_size) self.activation = activation @@ -102,7 +106,9 @@ def eval_single_layer(self, x): else x[:] ) if x_reshaped.shape != tuple(self.input_size): - raise ValueError(f"Layer requires an input size {self.input_size}, but the input tensor had size {x_reshaped.shape}.") + raise ValueError( + f"Layer requires an input size {self.input_size}, but the input tensor had size {x_reshaped.shape}." + ) y = self._eval(x_reshaped) return self._apply_activation(y) @@ -313,7 +319,9 @@ def get_input_index(self, out_index, kernel_index): def _eval(self, x): y = np.empty(shape=self.output_size) if len(self.output_size) != 3: - raise ValueError(f"Output should have 3 dimensions but instead has {len(self.output_size)}") + raise ValueError( + f"Output should have 3 dimensions but instead has {len(self.output_size)}" + ) [depth, rows, cols] = list(self.output_size) for out_d in range(depth): for out_r in range(rows): @@ -370,7 +378,9 @@ def __init__( input_index_mapper=input_index_mapper, ) if pool_func_name not in PoolingLayer2D._POOL_FUNCTIONS: - raise ValueError(f"Allowable pool functions are {PoolingLayer2D._POOL_FUNCTIONS}, {pool_func_name} was provided.") + raise ValueError( + f"Allowable pool functions are {PoolingLayer2D._POOL_FUNCTIONS}, {pool_func_name} was provided." + ) self._pool_func_name = pool_func_name self._kernel_shape = kernel_shape self._kernel_depth = kernel_depth diff --git a/src/omlt/neuralnet/layers/full_space.py b/src/omlt/neuralnet/layers/full_space.py index 71296bdd..a699a76e 100644 --- a/src/omlt/neuralnet/layers/full_space.py +++ b/src/omlt/neuralnet/layers/full_space.py @@ -5,6 +5,7 @@ from omlt.neuralnet.activations import NON_INCREASING_ACTIVATIONS from omlt.neuralnet.layer import ConvLayer2D, IndexMapper, PoolingLayer2D + def full_space_dense_layer(net_block, net, layer_block, layer): r""" Add full-space formulation of the dense layer to the block @@ -56,7 +57,9 @@ def full_space_conv2d_layer(net_block, net, layer_block, layer): ): # activation applied after convolution layer, so there shouldn't be an activation after max pooling too if succ_layer.activation != "linear": - raise ValueError(f"Activation is applied after convolution layer, but the successor max pooling layer {succ_layer} has an activation function also.") + raise ValueError( + f"Activation is applied after convolution layer, but the successor max pooling layer {succ_layer} has an activation function also." + ) succ_layer.activation = layer.activation layer.activation = "linear" @@ -114,10 +117,12 @@ def full_space_maxpool2d_layer(net_block, net, layer_block, layer): """ input_layer, input_layer_block = _input_layer_and_block(net_block, net, layer) - if not isinstance(input_layer,ConvLayer2D): + if not isinstance(input_layer, ConvLayer2D): raise TypeError("Input layer must be a ConvLayer2D.") if input_layer.activation != "linear": - raise ValueError("Non-increasing activation functions on the preceding convolutional layer are not supported.") + raise ValueError( + "Non-increasing activation functions on the preceding convolutional layer are not supported." + ) # TODO - add support for non-increasing activation functions on preceding convolutional layer # note kernel indexes are the same set of values for any output index, so wlog get kernel indexes for (0, 0, 0) diff --git a/src/omlt/neuralnet/layers/partition_based.py b/src/omlt/neuralnet/layers/partition_based.py index c0de2053..746196e7 100644 --- a/src/omlt/neuralnet/layers/partition_based.py +++ b/src/omlt/neuralnet/layers/partition_based.py @@ -45,7 +45,9 @@ def partition_based_dense_relu_layer(net_block, net, layer_block, layer, split_f # not an input layer, process the expressions prev_layers = list(net.predecessors(layer)) if len(prev_layers) == 0: - raise ValueError(f"Layer {layer} is not an input layer, but has no predecessors.") + raise ValueError( + f"Layer {layer} is not an input layer, but has no predecessors." + ) elif len(prev_layers) > 1: raise ValueError(f"Layer {layer} has multiple predecessors.") prev_layer = prev_layers[0] diff --git a/src/omlt/neuralnet/layers/reduced_space.py b/src/omlt/neuralnet/layers/reduced_space.py index 76a56a07..3fb67477 100644 --- a/src/omlt/neuralnet/layers/reduced_space.py +++ b/src/omlt/neuralnet/layers/reduced_space.py @@ -12,7 +12,9 @@ def reduced_space_dense_layer(net_block, net, layer_block, layer, activation): # not an input layer, process the expressions prev_layers = list(net.predecessors(layer)) if len(prev_layers) == 0: - raise ValueError(f"Layer {layer} is not an input layer, but has no predecessors.") + raise ValueError( + f"Layer {layer} is not an input layer, but has no predecessors." + ) elif len(prev_layers) > 1: raise ValueError(f"Layer {layer} has multiple predecessors.") prev_layer = prev_layers[0] diff --git a/tests/io/test_onnx_parser.py b/tests/io/test_onnx_parser.py index f2af846e..59cce5c9 100644 --- a/tests/io/test_onnx_parser.py +++ b/tests/io/test_onnx_parser.py @@ -115,7 +115,7 @@ def test_input_tensor_invalid_dims(datadir): parser = NetworkParser() with pytest.raises(ValueError) as excinfo: parser.parse_network(model.graph, None, None) - expected_msg = "All dimensions in graph \"tf2onnx\" input tensor have 0 value." + expected_msg = 'All dimensions in graph "tf2onnx" input tensor have 0 value.' assert str(excinfo.value) == expected_msg @@ -126,14 +126,14 @@ def test_no_input_layers(datadir): parser = NetworkParser() with pytest.raises(ValueError) as excinfo: parser.parse_network(model.graph, None, None) - expected_msg = "No valid input layer found in graph \"tf2onnx\"." + expected_msg = 'No valid input layer found in graph "tf2onnx".' assert str(excinfo.value) == expected_msg @pytest.mark.skipif(not onnx_available, reason="Need ONNX for this test") def test_node_no_inputs(datadir): model = onnx.load(datadir.file("keras_linear_131.onnx")) - while (len(model.graph.node[0].input) > 0): + while len(model.graph.node[0].input) > 0: model.graph.node[0].input.pop() parser = NetworkParser() with pytest.raises(ValueError) as excinfo: @@ -149,36 +149,36 @@ def test_consume_wrong_node_type(datadir): parser.parse_network(model.graph, None, None) with pytest.raises(ValueError) as excinfo: - parser._consume_dense_nodes(parser._nodes[ - 'StatefulPartitionedCall/keras_linear_131/dense/BiasAdd'][1], + parser._consume_dense_nodes( + parser._nodes['StatefulPartitionedCall/keras_linear_131/dense/BiasAdd'][1], parser._nodes['StatefulPartitionedCall/keras_linear_131/dense/BiasAdd'][2]) expected_msg_dense = "StatefulPartitionedCall/keras_linear_131/dense/BiasAdd is a Add node, only MatMul nodes can be used as starting points for consumption." assert str(excinfo.value) == expected_msg_dense with pytest.raises(ValueError) as excinfo: - parser._consume_gemm_dense_nodes(parser._nodes[ - 'StatefulPartitionedCall/keras_linear_131/dense/BiasAdd'][1], + parser._consume_gemm_dense_nodes( + parser._nodes['StatefulPartitionedCall/keras_linear_131/dense/BiasAdd'][1], parser._nodes['StatefulPartitionedCall/keras_linear_131/dense/BiasAdd'][2]) expected_msg_gemm = "StatefulPartitionedCall/keras_linear_131/dense/BiasAdd is a Add node, only Gemm nodes can be used as starting points for consumption." assert str(excinfo.value) == expected_msg_gemm with pytest.raises(ValueError) as excinfo: - parser._consume_conv_nodes(parser._nodes[ - 'StatefulPartitionedCall/keras_linear_131/dense/BiasAdd'][1], + parser._consume_conv_nodes( + parser._nodes['StatefulPartitionedCall/keras_linear_131/dense/BiasAdd'][1], parser._nodes['StatefulPartitionedCall/keras_linear_131/dense/BiasAdd'][2]) expected_msg_conv = "StatefulPartitionedCall/keras_linear_131/dense/BiasAdd is a Add node, only Conv nodes can be used as starting points for consumption." assert str(excinfo.value) == expected_msg_conv with pytest.raises(ValueError) as excinfo: - parser._consume_reshape_nodes(parser._nodes[ - 'StatefulPartitionedCall/keras_linear_131/dense/BiasAdd'][1], + parser._consume_reshape_nodes( + parser._nodes['StatefulPartitionedCall/keras_linear_131/dense/BiasAdd'][1], parser._nodes['StatefulPartitionedCall/keras_linear_131/dense/BiasAdd'][2]) expected_msg_reshape = "StatefulPartitionedCall/keras_linear_131/dense/BiasAdd is a Add node, only Reshape nodes can be used as starting points for consumption." assert str(excinfo.value) == expected_msg_reshape with pytest.raises(ValueError) as excinfo: - parser._consume_pool_nodes(parser._nodes[ - 'StatefulPartitionedCall/keras_linear_131/dense/BiasAdd'][1], + parser._consume_pool_nodes( + parser._nodes['StatefulPartitionedCall/keras_linear_131/dense/BiasAdd'][1], parser._nodes['StatefulPartitionedCall/keras_linear_131/dense/BiasAdd'][2]) expected_msg_pool = """StatefulPartitionedCall/keras_linear_131/dense/BiasAdd is a Add node, only MaxPool nodes can be used as starting points for consumption.""" assert str(excinfo.value) == expected_msg_pool @@ -192,8 +192,8 @@ def test_consume_dense_wrong_dims(datadir): parser._nodes['StatefulPartitionedCall/keras_linear_131/dense/MatMul'][1].input.append('abcd') with pytest.raises(ValueError) as excinfo: - parser._consume_dense_nodes(parser._nodes[ - 'StatefulPartitionedCall/keras_linear_131/dense/MatMul'][1], + parser._consume_dense_nodes( + parser._nodes['StatefulPartitionedCall/keras_linear_131/dense/MatMul'][1], parser._nodes['StatefulPartitionedCall/keras_linear_131/dense/MatMul'][2]) expected_msg_dense = "StatefulPartitionedCall/keras_linear_131/dense/MatMul input has 3 dimensions, only nodes with 2 input dimensions can be used as starting points for consumption." assert str(excinfo.value) == expected_msg_dense @@ -206,8 +206,9 @@ def test_consume_gemm_wrong_dims(datadir): parser.parse_network(model.graph, None, None) parser._nodes['Gemm_0'][1].input.append('abcd') with pytest.raises(ValueError) as excinfo: - parser._consume_gemm_dense_nodes(parser._nodes['Gemm_0'][1], - parser._nodes['Gemm_0'][2]) + parser._consume_gemm_dense_nodes( + parser._nodes['Gemm_0'][1], parser._nodes['Gemm_0'][2] + ) expected_msg_gemm = "Gemm_0 input has 4 dimensions, only nodes with 3 input dimensions can be used as starting points for consumption." assert str(excinfo.value) == expected_msg_gemm @@ -219,8 +220,9 @@ def test_consume_conv_wrong_dims(datadir): parser.parse_network(model.graph, None, None) parser._nodes['Conv_0'][1].input.append('abcd') with pytest.raises(ValueError) as excinfo: - parser._consume_conv_nodes(parser._nodes['Conv_0'][1], - parser._nodes['Conv_0'][2]) + parser._consume_conv_nodes( + parser._nodes['Conv_0'][1], parser._nodes['Conv_0'][2] + ) expected_msg_conv = "Conv_0 input has 4 dimensions, only nodes with 2 or 3 input dimensions can be used as starting points for consumption." assert str(excinfo.value) == expected_msg_conv @@ -232,8 +234,9 @@ def test_consume_reshape_wrong_dims(datadir): parser.parse_network(model.graph, None, None) parser._nodes['Reshape_2'][1].input.append('abcd') with pytest.raises(ValueError) as excinfo: - parser._consume_reshape_nodes(parser._nodes['Reshape_2'][1], - parser._nodes['Reshape_2'][2]) + parser._consume_reshape_nodes( + parser._nodes['Reshape_2'][1], parser._nodes['Reshape_2'][2] + ) expected_msg_reshape = """Reshape_2 input has 3 dimensions, only nodes with 2 input dimensions can be used as starting points for consumption.""" assert str(excinfo.value) == expected_msg_reshape @@ -245,6 +248,8 @@ def test_consume_maxpool_wrong_dims(datadir): parser.parse_network(model.graph, None, None) parser._nodes['node1'][1].input.append('abcd') with pytest.raises(ValueError) as excinfo: - parser._consume_pool_nodes(parser._nodes['node1'][1], parser._nodes['node1'][2]) + parser._consume_pool_nodes( + parser._nodes['node1'][1], parser._nodes['node1'][2] + ) expected_msg_maxpool = """node1 input has 2 dimensions, only nodes with 1 input dimension can be used as starting points for consumption.""" assert str(excinfo.value) == expected_msg_maxpool diff --git a/tests/neuralnet/test_network_definition.py b/tests/neuralnet/test_network_definition.py index 310f3451..dfed1ef3 100644 --- a/tests/neuralnet/test_network_definition.py +++ b/tests/neuralnet/test_network_definition.py @@ -133,6 +133,7 @@ def test_input_bound_scaling_multiD(): ) assert net.scaled_input_bounds == scaled_input_bounds + def _test_add_invalid_edge(direction): """ direction can be "in" or "out" @@ -160,17 +161,17 @@ def _test_add_invalid_edge(direction): biases=np.array([0.0, 0.0]), ) - if direction == 'in': + if direction == "in": with pytest.raises(ValueError) as excinfo: - net.add_edge(input_layer,dense_layer_1) + net.add_edge(input_layer, dense_layer_1) expected_msg = f"Inbound layer {dense_layer_1} not found in network." assert str(excinfo.value) == expected_msg - elif direction == 'out': + elif direction == "out": with pytest.raises(ValueError) as excinfo: - net.add_edge(dense_layer_1,dense_layer_0) + net.add_edge(dense_layer_1, dense_layer_0) expected_msg = f"Outbound layer {dense_layer_1} not found in network." assert str(excinfo.value) == expected_msg def test_add_invalid_edge(): - _test_add_invalid_edge('in') - _test_add_invalid_edge('out') + _test_add_invalid_edge("in") + _test_add_invalid_edge("out") diff --git a/tests/neuralnet/test_nn_formulation.py b/tests/neuralnet/test_nn_formulation.py index c856a0c9..d5ce9c37 100644 --- a/tests/neuralnet/test_nn_formulation.py +++ b/tests/neuralnet/test_nn_formulation.py @@ -21,11 +21,11 @@ ) from omlt.neuralnet.layers.full_space import ( full_space_maxpool2d_layer, - _input_layer_and_block + _input_layer_and_block, ) from omlt.neuralnet.layers.partition_based import ( partition_based_dense_relu_layer, - default_partition_split_func + default_partition_split_func, ) from omlt.neuralnet.layers.reduced_space import reduced_space_dense_layer @@ -350,9 +350,9 @@ def _test_formulation_initialize_extra_input(network_formulation): extra_input = InputLayer([1]) net.add_layer(extra_input) with pytest.raises(ValueError) as excinfo: - if network_formulation == 'FullSpace': + if network_formulation == "FullSpace": FullSpaceNNFormulation(net) - elif network_formulation == 'ReducedSpace': + elif network_formulation == "ReducedSpace": ReducedSpaceNNFormulation(net) expected_msg = "Multiple input layers are not currently supported." assert str(excinfo.value) == expected_msg @@ -367,11 +367,11 @@ def _test_formulation_added_extra_input(network_formulation): """ net, y = two_node_network("linear", -2.0) extra_input = InputLayer([1]) - if network_formulation == 'FullSpace': + if network_formulation == "FullSpace": formulation = FullSpaceNNFormulation(net) - elif network_formulation == 'ReducedSpace': + elif network_formulation == "ReducedSpace": formulation = ReducedSpaceNNFormulation(net) - elif network_formulation == 'relu': + elif network_formulation == "relu": formulation = ReluPartitionFormulation(net) net.add_layer(extra_input) with pytest.raises(ValueError) as excinfo: @@ -389,11 +389,11 @@ def _test_formulation_build_extra_input(network_formulation): """ net, y = two_node_network("linear", -2.0) extra_input = InputLayer([1]) - if network_formulation == 'FullSpace': + if network_formulation == "FullSpace": formulation = FullSpaceNNFormulation(net) - elif network_formulation == 'ReducedSpace': + elif network_formulation == "ReducedSpace": formulation = ReducedSpaceNNFormulation(net) - elif network_formulation == 'relu': + elif network_formulation == "relu": formulation = ReluPartitionFormulation(net) net.add_layer(extra_input) m = pyo.ConcreteModel() @@ -419,11 +419,11 @@ def _test_formulation_added_extra_output(network_formulation): weights=np.array([[1.0, 0.0], [5.0, 1.0]]), biases=np.array([3.0, 4.0]), ) - if network_formulation == 'FullSpace': + if network_formulation == "FullSpace": formulation = FullSpaceNNFormulation(net) - elif network_formulation == 'ReducedSpace': + elif network_formulation == "ReducedSpace": formulation = ReducedSpaceNNFormulation(net) - elif network_formulation == 'relu': + elif network_formulation == "relu": formulation = ReluPartitionFormulation(net) net.add_layer(extra_output) net.add_edge(list(net.layers)[-2], extra_output) @@ -450,9 +450,9 @@ def _test_formulation_initialize_extra_output(network_formulation): net.add_layer(extra_output) net.add_edge(list(net.layers)[-2], extra_output) with pytest.raises(ValueError) as excinfo: - if network_formulation == 'FullSpace': + if network_formulation == "FullSpace": FullSpaceNNFormulation(net) - elif network_formulation == 'ReducedSpace': + elif network_formulation == "ReducedSpace": ReducedSpaceNNFormulation(net) expected_msg = "Multiple output layers are not currently supported." assert str(excinfo.value) == expected_msg @@ -489,9 +489,9 @@ def _test_dense_layer_multiple_predecessors(layer_type): net.add_layer(extra_input) net.add_edge(extra_input, test_layer) with pytest.raises(ValueError) as excinfo: - if layer_type == 'PartitionBased': + if layer_type == "PartitionBased": partition_based_dense_relu_layer(m, net, m, test_layer, None) - elif layer_type == 'ReducedSpace': + elif layer_type == "ReducedSpace": reduced_space_dense_layer(m, net, m, test_layer, None) expected_msg = f"Layer {test_layer} has multiple predecessors." assert str(excinfo.value) == expected_msg @@ -513,9 +513,9 @@ def _test_dense_layer_no_predecessors(layer_type): ) net.add_layer(test_layer) with pytest.raises(ValueError) as excinfo: - if layer_type == 'PartitionBased': + if layer_type == "PartitionBased": partition_based_dense_relu_layer(m, net, m, test_layer, None) - elif layer_type == 'ReducedSpace': + elif layer_type == "ReducedSpace": reduced_space_dense_layer(m, net, m, test_layer, None) expected_msg = f"Layer {test_layer} is not an input layer, but has no predecessors." assert str(excinfo.value) == expected_msg @@ -546,8 +546,9 @@ def test_partition_based_unbounded_below(): split_func = lambda w: default_partition_split_func(w, 2) with pytest.raises(ValueError) as excinfo: - partition_based_dense_relu_layer(m.neural_net_block, net, - m.neural_net_block, test_layer, split_func) + partition_based_dense_relu_layer( + m.neural_net_block, net, m.neural_net_block, test_layer, split_func + ) expected_msg = "Expression is unbounded below." assert str(excinfo.value) == expected_msg @@ -567,8 +568,9 @@ def test_partition_based_unbounded_above(): split_func = lambda w: default_partition_split_func(w, 2) with pytest.raises(ValueError) as excinfo: - partition_based_dense_relu_layer(m.neural_net_block, net, m.neural_net_block, - test_layer, split_func) + partition_based_dense_relu_layer( + m.neural_net_block, net, m.neural_net_block, test_layer, split_func + ) expected_msg = "Expression is unbounded above." assert str(excinfo.value) == expected_msg @@ -586,8 +588,9 @@ def test_partition_based_bias_unbounded_below(): split_func = lambda w: default_partition_split_func(w, 2) with pytest.raises(ValueError) as excinfo: - partition_based_dense_relu_layer(m.neural_net_block, net, - m.neural_net_block, test_layer, split_func) + partition_based_dense_relu_layer( + m.neural_net_block, net, m.neural_net_block, test_layer, split_func + ) expected_msg = "Expression is unbounded below." assert str(excinfo.value) == expected_msg @@ -605,8 +608,9 @@ def test_partition_based_bias_unbounded_above(): split_func = lambda w: default_partition_split_func(w, 2) with pytest.raises(ValueError) as excinfo: - partition_based_dense_relu_layer(m.neural_net_block, net, m.neural_net_block, - test_layer, split_func) + partition_based_dense_relu_layer( + m.neural_net_block, net, m.neural_net_block, test_layer, split_func + ) expected_msg = "Expression is unbounded above." assert str(excinfo.value) == expected_msg @@ -711,19 +715,25 @@ def test_maxpool2d_bad_input_activation(): # test normal ConvLayer -> MaxPoolLayer structure, with monotonic increasing # activation part of ConvLayer maxpool_layer_1 = PoolingLayer2D( - conv_layer_2.output_size, [1, 1, 2], [2, 2], "max", [3, 2], - 1, activation="linear" + conv_layer_2.output_size, + [1, 1, 2], + [2, 2], + "max", + [3, 2], + 1, + activation="linear", ) net.add_layer(maxpool_layer_1) net.add_edge(conv_layer_2, maxpool_layer_1) m.neural_net_block.build_formulation(FullSpaceNNFormulation(net)) - conv_layer_2.activation = 'relu' + conv_layer_2.activation = "relu" with pytest.raises(ValueError) as excinfo: - full_space_maxpool2d_layer(m.neural_net_block, net, m.neural_net_block, - maxpool_layer_1) + full_space_maxpool2d_layer( + m.neural_net_block, net, m.neural_net_block, maxpool_layer_1 + ) expected_msg = """Non-increasing activation functions on the preceding convolutional layer are not supported.""" assert str(excinfo.value) == expected_msg @@ -765,15 +775,25 @@ def test_maxpool2d_bad_input_layer(): # test normal ConvLayer -> MaxPoolLayer structure, with monotonic increasing # activation part of ConvLayer maxpool_layer_1 = PoolingLayer2D( - conv_layer_2.output_size, [1, 1, 2], [2, 2], "max", - [3, 2], 1, activation="linear" + conv_layer_2.output_size, + [1, 1, 2], + [2, 2], + "max", + [3, 2], + 1, + activation="linear", ) net.add_layer(maxpool_layer_1) net.add_edge(conv_layer_2, maxpool_layer_1) maxpool_layer_2 = PoolingLayer2D( - maxpool_layer_1.output_size, [1, 1, 2], [2, 2], "max", - [3, 2], 1, activation="linear" + maxpool_layer_1.output_size, + [1, 1, 2], + [2, 2], + "max", + [3, 2], + 1, + activation="linear", ) net.add_layer(maxpool_layer_2) net.add_edge(maxpool_layer_1, maxpool_layer_2)