From c0319162ad2163876b9497c319599d3659d1bbda Mon Sep 17 00:00:00 2001 From: Guy Katz Date: Sun, 26 Apr 2020 08:50:08 +0300 Subject: [PATCH 1/2] Basic bound tightening capabilities for the NLR (#245) * test * basic interval artihmetic bound propagation * another test case * throw an error for unsupported activation functions --- src/engine/MarabouError.h | 1 + src/engine/NetworkLevelReasoner.cpp | 197 ++++++++++++++++++- src/engine/NetworkLevelReasoner.h | 36 +++- src/engine/tests/Test_NetworkLevelReasoner.h | 172 ++++++++++++---- src/input_parsers/AcasParser.cpp | 16 +- 5 files changed, 379 insertions(+), 43 deletions(-) diff --git a/src/engine/MarabouError.h b/src/engine/MarabouError.h index e46fa2600..5de848a15 100644 --- a/src/engine/MarabouError.h +++ b/src/engine/MarabouError.h @@ -46,6 +46,7 @@ class MarabouError : public Error MERGED_OUTPUT_VARIABLE = 21, INVALID_WEIGHTED_SUM_INDEX = 22, UNSUCCESSFUL_QUEUE_PUSH = 23, + NETWORK_LEVEL_REASONER_ACTIVATION_NOT_SUPPORTED = 24, // Error codes for Query Loader FILE_DOES_NOT_EXIST = 100, diff --git a/src/engine/NetworkLevelReasoner.cpp b/src/engine/NetworkLevelReasoner.cpp index aba2bf53a..ab9c5edd8 100644 --- a/src/engine/NetworkLevelReasoner.cpp +++ b/src/engine/NetworkLevelReasoner.cpp @@ -14,9 +14,10 @@ **/ #include "Debug.h" +#include "FloatUtils.h" #include "MStringf.h" -#include "NetworkLevelReasoner.h" #include "MarabouError.h" +#include "NetworkLevelReasoner.h" #include NetworkLevelReasoner::NetworkLevelReasoner() @@ -24,6 +25,11 @@ NetworkLevelReasoner::NetworkLevelReasoner() , _maxLayerSize( 0 ) , _work1( NULL ) , _work2( NULL ) + , _tableau( NULL ) + , _lowerBoundsWeightedSums( NULL ) + , _upperBoundsWeightedSums( NULL ) + , _lowerBoundsActivations( NULL ) + , _upperBoundsActivations( NULL ) { } @@ -60,6 +66,66 @@ void NetworkLevelReasoner::freeMemoryIfNeeded() delete[] _work2; _work2 = NULL; } + + if ( _lowerBoundsWeightedSums ) + { + for ( unsigned i = 0; i < _numberOfLayers; ++i ) + { + if ( _lowerBoundsWeightedSums[i] ) + { + delete[] _lowerBoundsWeightedSums[i]; + _lowerBoundsWeightedSums[i] = NULL; + } + } + + delete[] _lowerBoundsWeightedSums; + _lowerBoundsWeightedSums = NULL; + } + + if ( _upperBoundsWeightedSums ) + { + for ( unsigned i = 0; i < _numberOfLayers; ++i ) + { + if ( _upperBoundsWeightedSums[i] ) + { + delete[] _upperBoundsWeightedSums[i]; + _upperBoundsWeightedSums[i] = NULL; + } + } + + delete[] _upperBoundsWeightedSums; + _upperBoundsWeightedSums = NULL; + } + + if ( _lowerBoundsActivations ) + { + for ( unsigned i = 0; i < _numberOfLayers; ++i ) + { + if ( _lowerBoundsActivations[i] ) + { + delete[] _lowerBoundsActivations[i]; + _lowerBoundsActivations[i] = NULL; + } + } + + delete[] _lowerBoundsActivations; + _lowerBoundsActivations = NULL; + } + + if ( _upperBoundsActivations ) + { + for ( unsigned i = 0; i < _numberOfLayers; ++i ) + { + if ( _upperBoundsActivations[i] ) + { + delete[] _upperBoundsActivations[i]; + _upperBoundsActivations[i] = NULL; + } + } + + delete[] _upperBoundsActivations; + _upperBoundsActivations = NULL; + } } void NetworkLevelReasoner::setNumberOfLayers( unsigned numberOfLayers ) @@ -76,7 +142,7 @@ void NetworkLevelReasoner::setLayerSize( unsigned layer, unsigned size ) _maxLayerSize = size; } -void NetworkLevelReasoner::allocateWeightMatrices() +void NetworkLevelReasoner::allocateMemoryByTopology() { freeMemoryIfNeeded(); @@ -99,6 +165,27 @@ void NetworkLevelReasoner::allocateWeightMatrices() _work2 = new double[_maxLayerSize]; if ( !_work2 ) throw MarabouError( MarabouError::ALLOCATION_FAILED, "NetworkLevelReasoner::work2" ); + + _lowerBoundsWeightedSums = new double *[_numberOfLayers]; + _upperBoundsWeightedSums = new double *[_numberOfLayers]; + _lowerBoundsActivations = new double *[_numberOfLayers]; + _upperBoundsActivations = new double *[_numberOfLayers]; + + if ( !_lowerBoundsWeightedSums || !_upperBoundsWeightedSums || + !_lowerBoundsActivations || !_upperBoundsActivations ) + throw MarabouError( MarabouError::ALLOCATION_FAILED, "NetworkLevelReasoner::bounds" ); + + for ( unsigned i = 0; i < _numberOfLayers; ++i ) + { + _lowerBoundsWeightedSums[i] = new double[_layerSizes[i]]; + _upperBoundsWeightedSums[i] = new double[_layerSizes[i]]; + _lowerBoundsActivations[i] = new double[_layerSizes[i]]; + _upperBoundsActivations[i] = new double[_layerSizes[i]]; + + if ( !_lowerBoundsWeightedSums[i] || !_lowerBoundsWeightedSums[i] || + !_lowerBoundsActivations[i] || !_lowerBoundsActivations[i] ) + throw MarabouError( MarabouError::ALLOCATION_FAILED, "NetworkLevelReasoner::bounds[i]" ); + } } void NetworkLevelReasoner::setNeuronActivationFunction( unsigned layer, unsigned neuron, ActivationFunction activationFuction ) @@ -155,7 +242,7 @@ void NetworkLevelReasoner::evaluate( double *input, double *output ) break; default: - ASSERT( false ); + throw MarabouError( MarabouError::NETWORK_LEVEL_REASONER_ACTIVATION_NOT_SUPPORTED ); break; } } @@ -206,7 +293,7 @@ void NetworkLevelReasoner::storeIntoOther( NetworkLevelReasoner &other ) const other.setNumberOfLayers( _numberOfLayers ); for ( const auto &pair : _layerSizes ) other.setLayerSize( pair.first, pair.second ); - other.allocateWeightMatrices(); + other.allocateMemoryByTopology(); for ( const auto &pair : _neuronToActivationFunction ) other.setNeuronActivationFunction( pair.first._layer, pair.first._neuron, pair.second ); @@ -330,6 +417,108 @@ void NetworkLevelReasoner::updateVariableIndices( const Map } } +void NetworkLevelReasoner::obtainInputBounds() +{ + ASSERT( _tableau ); + + for ( unsigned i = 0; i < _layerSizes[0]; ++i ) + { + unsigned varIndex = _indexToActivationResultVariable[Index( 0, i )]; + _lowerBoundsActivations[0][i] = _tableau->getLowerBound( varIndex ); + _upperBoundsActivations[0][i] = _tableau->getUpperBound( varIndex ); + } +} + +void NetworkLevelReasoner::setTableau( const ITableau *tableau ) +{ + _tableau = tableau; +} + +void NetworkLevelReasoner::intervalArithmeticBoundPropagation() +{ + for ( unsigned i = 1; i < _numberOfLayers; ++i ) + { + for ( unsigned j = 0; j < _layerSizes[i]; ++j ) + { + Index index( i, j ); + double lb = _bias.exists( index ) ? _bias[index] : 0; + double ub = _bias.exists( index ) ? _bias[index] : 0; + + for ( unsigned k = 0; k < _layerSizes[i - 1]; ++k ) + { + double previousLb = _lowerBoundsActivations[i - 1][k]; + double previousUb = _upperBoundsActivations[i - 1][k]; + double weight = _weights[i - 1][k * _layerSizes[i] + j]; + + if ( weight > 0 ) + { + lb += weight * previousLb; + ub += weight * previousUb; + } + else + { + lb += weight * previousUb; + ub += weight * previousLb; + } + } + + _lowerBoundsWeightedSums[i][j] = lb; + _upperBoundsWeightedSums[i][j] = ub; + + if ( i != _numberOfLayers - 1 ) + { + // Apply activation function + ASSERT( _neuronToActivationFunction.exists( index ) ); + + switch ( _neuronToActivationFunction[index] ) + { + case ReLU: + _lowerBoundsActivations[i][j] = lb > 0 ? lb : 0; + _upperBoundsActivations[i][j] = ub; + break; + + default: + ASSERT( false ); + break; + } + } + } + } +} + +void NetworkLevelReasoner::getConstraintTightenings( List &tightenings ) const +{ + tightenings.clear(); + + for ( unsigned i = 1; i < _numberOfLayers; ++i ) + { + for ( unsigned j = 0; j < _layerSizes[i]; ++j ) + { + Index index( i, j ); + + // Weighted sums + tightenings.append( Tightening( _indexToWeightedSumVariable[index], + _lowerBoundsWeightedSums[i][j], + Tightening::LB ) ); + + tightenings.append( Tightening( _indexToWeightedSumVariable[index], + _upperBoundsWeightedSums[i][j], + Tightening::UB ) ); + + // Activation results + if ( i != _numberOfLayers - 1 ) + { + tightenings.append( Tightening( _indexToActivationResultVariable[index], + _lowerBoundsActivations[i][j], + Tightening::LB ) ); + tightenings.append( Tightening( _indexToActivationResultVariable[index], + _upperBoundsActivations[i][j], + Tightening::UB ) ); + } + } + } +} + // // Local Variables: // compile-command: "make -C ../.. " diff --git a/src/engine/NetworkLevelReasoner.h b/src/engine/NetworkLevelReasoner.h index 2bee0b19f..b06753776 100644 --- a/src/engine/NetworkLevelReasoner.h +++ b/src/engine/NetworkLevelReasoner.h @@ -16,7 +16,9 @@ #ifndef __NetworkLevelReasoner_h__ #define __NetworkLevelReasoner_h__ +#include "ITableau.h" #include "Map.h" +#include "Tightening.h" /* A class for performing operations that require knowledge of network @@ -68,11 +70,17 @@ class NetworkLevelReasoner void setNumberOfLayers( unsigned numberOfLayers ); void setLayerSize( unsigned layer, unsigned size ); - void allocateWeightMatrices(); void setNeuronActivationFunction( unsigned layer, unsigned neuron, ActivationFunction activationFuction ); void setWeight( unsigned sourceLayer, unsigned sourceNeuron, unsigned targetNeuron, double weight ); void setBias( unsigned layer, unsigned neuron, double bias ); + /* + A method that allocates all internal memory structures, based on + the network's topology. Should be invoked after the layer sizes + have been provided. + */ + void allocateMemoryByTopology(); + /* Mapping from node indices to the variables representing their weighted sum values and activation result values. @@ -108,6 +116,22 @@ class NetworkLevelReasoner void updateVariableIndices( const Map &oldIndexToNewIndex, const Map &mergedVariables ); + /* + Bound propagation methods: + + - obtainInputBounds: obtain the current bounds on input variables + from the tableau. + + - Interval arithmetic: compute the bounds of a layer's neurons + based on the concrete bounds of the previous layer. + */ + + void setTableau( const ITableau *tableau ); + void obtainInputBounds(); + void intervalArithmeticBoundPropagation(); + + void getConstraintTightenings( List &tightenings ) const; + private: unsigned _numberOfLayers; Map _layerSizes; @@ -120,6 +144,8 @@ class NetworkLevelReasoner double *_work1; double *_work2; + const ITableau *_tableau; + void freeMemoryIfNeeded(); /* @@ -133,6 +159,14 @@ class NetworkLevelReasoner */ Map _indexToWeightedSumAssignment; Map _indexToActivationResultAssignment; + + /* + Work space for bound tightening. + */ + double **_lowerBoundsWeightedSums; + double **_upperBoundsWeightedSums; + double **_lowerBoundsActivations; + double **_upperBoundsActivations; }; #endif // __NetworkLevelReasoner_h__ diff --git a/src/engine/tests/Test_NetworkLevelReasoner.h b/src/engine/tests/Test_NetworkLevelReasoner.h index 10d5cff3f..aca17c2b3 100644 --- a/src/engine/tests/Test_NetworkLevelReasoner.h +++ b/src/engine/tests/Test_NetworkLevelReasoner.h @@ -16,7 +16,9 @@ #include #include "FloatUtils.h" +#include "MockTableau.h" #include "NetworkLevelReasoner.h" +#include "Tightening.h" class MockForNetworkLevelReasoner { @@ -46,7 +48,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite b y e g c - */ + */ nlr.setNumberOfLayers( 4 ); @@ -55,7 +57,7 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite nlr.setLayerSize( 2, 2 ); nlr.setLayerSize( 3, 2 ); - nlr.allocateWeightMatrices(); + nlr.allocateMemoryByTopology(); // Weights nlr.setWeight( 0, 0, 0, 1 ); @@ -77,51 +79,43 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite // Biases nlr.setBias( 1, 0, 1 ); nlr.setBias( 2, 1, 2 ); - } - - void test_evaluate() - { - NetworkLevelReasoner nlr; - - populateNetwork( nlr ); - - double input[2]; - double output[2]; - - // Inputs are zeros, only biases count - input[0] = 0; - input[1] = 0; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - - TS_ASSERT( FloatUtils::areEqual( output[0], 1 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], 4 ) ); - - // No ReLUs, case 1 - input[0] = 1; - input[1] = 1; - - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); - TS_ASSERT( FloatUtils::areEqual( output[0], 0 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], -6 ) ); + // Variable indexing + nlr.setActivationResultVariable( 0, 0, 0 ); + nlr.setActivationResultVariable( 0, 1, 1 ); - // No ReLUs, case 2 - input[0] = 1; - input[1] = 2; + nlr.setWeightedSumVariable( 1, 0, 2 ); + nlr.setActivationResultVariable( 1, 0, 3 ); + nlr.setWeightedSumVariable( 1, 1, 4 ); + nlr.setActivationResultVariable( 1, 1, 5 ); + nlr.setWeightedSumVariable( 1, 2, 6 ); + nlr.setActivationResultVariable( 1, 2, 7 ); - TS_ASSERT_THROWS_NOTHING( nlr.evaluate( input, output ) ); + nlr.setWeightedSumVariable( 2, 0, 8 ); + nlr.setActivationResultVariable( 2, 0, 9 ); + nlr.setWeightedSumVariable( 2, 1, 10 ); + nlr.setActivationResultVariable( 2, 1, 11 ); - TS_ASSERT( FloatUtils::areEqual( output[0], -4 ) ); - TS_ASSERT( FloatUtils::areEqual( output[1], -22 ) ); + nlr.setWeightedSumVariable( 3, 0, 12 ); + nlr.setWeightedSumVariable( 3, 1, 13 ); - // Set all neurons to ReLU, except for input and output neurons + // Mark nodes as ReLUs nlr.setNeuronActivationFunction( 1, 0, NetworkLevelReasoner::ReLU ); nlr.setNeuronActivationFunction( 1, 1, NetworkLevelReasoner::ReLU ); nlr.setNeuronActivationFunction( 1, 2, NetworkLevelReasoner::ReLU ); nlr.setNeuronActivationFunction( 2, 0, NetworkLevelReasoner::ReLU ); nlr.setNeuronActivationFunction( 2, 1, NetworkLevelReasoner::ReLU ); + } + + void test_evaluate() + { + NetworkLevelReasoner nlr; + + populateNetwork( nlr ); + + double input[2]; + double output[2]; // With ReLUs, Inputs are zeros, only biases count input[0] = 0; @@ -149,7 +143,6 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT( FloatUtils::areEqual( output[0], 0 ) ); TS_ASSERT( FloatUtils::areEqual( output[1], 0 ) ); - } void test_store_into_other() @@ -206,6 +199,111 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT( FloatUtils::areEqual( output1[0], output2[0] ) ); TS_ASSERT( FloatUtils::areEqual( output1[1], output2[1] ) ); } + + void test_interval_arithmetic_bound_propagation() + { + NetworkLevelReasoner nlr; + populateNetwork( nlr ); + + MockTableau tableau; + + // Initialize the input bounds + tableau.setLowerBound( 0, -1 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 1 ); + + nlr.setTableau( &tableau ); + + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainInputBounds() ); + + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + + List expectedBounds({ + Tightening( 2, 0, Tightening::LB ), + Tightening( 2, 2, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), + Tightening( 3, 2, Tightening::UB ), + + Tightening( 4, -5, Tightening::LB ), + Tightening( 4, 5, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), + Tightening( 5, 5, Tightening::UB ), + + Tightening( 6, -1, Tightening::LB ), + Tightening( 6, 1, Tightening::UB ), + Tightening( 7, 0, Tightening::LB ), + Tightening( 7, 1, Tightening::UB ), + + Tightening( 8, -1, Tightening::LB ), + Tightening( 8, 7, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), + Tightening( 9, 7, Tightening::UB ), + + Tightening( 10, -1, Tightening::LB ), + Tightening( 10, 7, Tightening::UB ), + Tightening( 11, 0, Tightening::LB ), + Tightening( 11, 7, Tightening::UB ), + + Tightening( 12, 0, Tightening::LB ), + Tightening( 12, 7, Tightening::UB ), + Tightening( 13, 0, Tightening::LB ), + Tightening( 13, 28, Tightening::UB ), + }); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT_EQUALS( expectedBounds, bounds ); + + // Change the input bounds + tableau.setLowerBound( 0, -3 ); + tableau.setUpperBound( 0, 1 ); + tableau.setLowerBound( 1, -1 ); + tableau.setUpperBound( 1, 2 ); + + // Initialize + TS_ASSERT_THROWS_NOTHING( nlr.obtainInputBounds() ); + + // Perform the tightening pass + TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); + + List expectedBounds2({ + Tightening( 2, -2, Tightening::LB ), + Tightening( 2, 2, Tightening::UB ), + Tightening( 3, 0, Tightening::LB ), + Tightening( 3, 2, Tightening::UB ), + + Tightening( 4, -12, Tightening::LB ), + Tightening( 4, 5, Tightening::UB ), + Tightening( 5, 0, Tightening::LB ), + Tightening( 5, 5, Tightening::UB ), + + Tightening( 6, -1, Tightening::LB ), + Tightening( 6, 2, Tightening::UB ), + Tightening( 7, 0, Tightening::LB ), + Tightening( 7, 2, Tightening::UB ), + + Tightening( 8, -2, Tightening::LB ), + Tightening( 8, 7, Tightening::UB ), + Tightening( 9, 0, Tightening::LB ), + Tightening( 9, 7, Tightening::UB ), + + Tightening( 10, -2, Tightening::LB ), + Tightening( 10, 7, Tightening::UB ), + Tightening( 11, 0, Tightening::LB ), + Tightening( 11, 7, Tightening::UB ), + + Tightening( 12, 0, Tightening::LB ), + Tightening( 12, 7, Tightening::UB ), + Tightening( 13, 0, Tightening::LB ), + Tightening( 13, 28, Tightening::UB ), + }); + + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + TS_ASSERT_EQUALS( expectedBounds2, bounds ); + } }; // diff --git a/src/input_parsers/AcasParser.cpp b/src/input_parsers/AcasParser.cpp index 1c96e2b1a..613deed68 100644 --- a/src/input_parsers/AcasParser.cpp +++ b/src/input_parsers/AcasParser.cpp @@ -178,7 +178,7 @@ void AcasParser::generateQuery( InputQuery &inputQuery ) for ( unsigned i = 0; i < numberOfLayers; ++i ) nlr->setLayerSize( i, _acasNeuralNetwork.getLayerSize( i ) ); - nlr->allocateWeightMatrices(); + nlr->allocateMemoryByTopology(); // Biases for ( unsigned i = 1; i < numberOfLayers; ++i ) @@ -220,6 +220,20 @@ void AcasParser::generateQuery( InputQuery &inputQuery ) } } + // Input variables are treated as "activation results"; output + // variables are treated as "weighted sums" + for ( unsigned i = 0; i < _acasNeuralNetwork.getLayerSize( 0 ); ++i ) + { + unsigned var = _nodeToF[NodeIndex( 0, i )]; + nlr->setActivationResultVariable( 0, i, var ); + } + + for ( unsigned i = 0; i < _acasNeuralNetwork.getLayerSize( numberOfLayers - 1 ); ++i ) + { + unsigned var = _nodeToB[NodeIndex( 0, i )]; + nlr->setWeightedSumVariable( numberOfLayers - 1, i, var ); + } + // Store the reasoner in the input query inputQuery.setNetworkLevelReasoner( nlr ); From 98d7e9df267471801ab33b612f09af0df219aa15 Mon Sep 17 00:00:00 2001 From: Guy Katz Date: Sun, 26 Apr 2020 08:51:25 +0300 Subject: [PATCH 2/2] comments (#250) --- src/engine/AbsoluteValueConstraint.h | 4 ++++ src/engine/MaxConstraint.h | 6 +++++- src/engine/ReluConstraint.h | 4 ++++ 3 files changed, 13 insertions(+), 1 deletion(-) diff --git a/src/engine/AbsoluteValueConstraint.h b/src/engine/AbsoluteValueConstraint.h index a652c0b1d..3cab867fe 100644 --- a/src/engine/AbsoluteValueConstraint.h +++ b/src/engine/AbsoluteValueConstraint.h @@ -28,6 +28,10 @@ class AbsoluteValueConstraint : public PiecewiseLinearConstraint PHASE_NEGATIVE = 2, }; + /* + The f variable is the absolute value of the b variable: + f = | b | + */ AbsoluteValueConstraint( unsigned b, unsigned f ); /* diff --git a/src/engine/MaxConstraint.h b/src/engine/MaxConstraint.h index 0f28bae1a..c76edff77 100644 --- a/src/engine/MaxConstraint.h +++ b/src/engine/MaxConstraint.h @@ -24,6 +24,10 @@ class MaxConstraint : public PiecewiseLinearConstraint public: ~MaxConstraint(); + /* + The f variable is the output of max over the variables stored in elements: + f = max( elements ) + */ MaxConstraint( unsigned f, const Set &elements ); MaxConstraint( const String &serializedMax ); @@ -125,6 +129,7 @@ class MaxConstraint : public PiecewiseLinearConstraint private: unsigned _f; Set _elements; + unsigned _maxIndex; bool _maxIndexSet; double _maxLowerBound; @@ -136,7 +141,6 @@ class MaxConstraint : public PiecewiseLinearConstraint Returns the phase where variable argMax has maximum value. */ PiecewiseLinearCaseSplit getSplit( unsigned argMax ) const; - }; #endif // __MaxConstraint_h__ diff --git a/src/engine/ReluConstraint.h b/src/engine/ReluConstraint.h index b6c7bc861..303b162e5 100644 --- a/src/engine/ReluConstraint.h +++ b/src/engine/ReluConstraint.h @@ -28,6 +28,10 @@ class ReluConstraint : public PiecewiseLinearConstraint PHASE_INACTIVE = 2, }; + /* + The f variable is the relu output on the b variable: + f = relu( b ) + */ ReluConstraint( unsigned b, unsigned f ); ReluConstraint( const String &serializedRelu );