diff --git a/maraboupy/MarabouCore.cpp b/maraboupy/MarabouCore.cpp index d09e91eec..1da4bce65 100644 --- a/maraboupy/MarabouCore.cpp +++ b/maraboupy/MarabouCore.cpp @@ -237,7 +237,7 @@ PYBIND11_MODULE(MarabouCore, m) { .def("markInputVariable", &InputQuery::markInputVariable) .def("markOutputVariable", &InputQuery::markOutputVariable) .def("outputVariableByIndex", &InputQuery::outputVariableByIndex) - .def("setSymbolicBoundTightener", &InputQuery::setSymbolicBoundTightener); + .def("setNetworkLevelReasoner", &InputQuery::setNetworkLevelReasoner); py::class_(m, "Options") .def(py::init()) .def_readwrite("_numWorkers", &MarabouOptions::_numWorkers) @@ -248,17 +248,16 @@ PYBIND11_MODULE(MarabouCore, m) { .def_readwrite("_timeoutFactor", &MarabouOptions::_timeoutFactor) .def_readwrite("_verbosity", &MarabouOptions::_verbosity) .def_readwrite("_dnc", &MarabouOptions::_dnc); - py::class_>(m, "SymbolicBoundTightener") + py::class_>(m, "NetworkLevelReasoner") .def(py::init()) - .def("setNumberOfLayers", &SymbolicBoundTightener::setNumberOfLayers) - .def("setLayerSize", &SymbolicBoundTightener::setLayerSize) - .def("allocateWeightAndBiasSpace", &SymbolicBoundTightener::allocateWeightAndBiasSpace) - .def("setBias", &SymbolicBoundTightener::setBias) - .def("setWeight", &SymbolicBoundTightener::setWeight) - .def("setInputLowerBound", &SymbolicBoundTightener::setInputLowerBound) - .def("setInputUpperBound", &SymbolicBoundTightener::setInputUpperBound) - .def("setReluBVariable", &SymbolicBoundTightener::setReluBVariable) - .def("setReluFVariable", &SymbolicBoundTightener::setReluFVariable); + .def("setNumberOfLayers", &NetworkLevelReasoner::setNumberOfLayers) + .def("setLayerSize", &NetworkLevelReasoner::setLayerSize) + .def("setNeuronActivationFunction", &NetworkLevelReasoner::setNeuronActivationFunction) + .def("setBias", &NetworkLevelReasoner::setBias) + .def("setWeight", &NetworkLevelReasoner::setWeight) + .def("allocateMemoryByTopology", &NetworkLevelReasoner::allocateMemoryByTopology) + .def("setWeightedSumVariable", &NetworkLevelReasoner::setWeightedSumVariable) + .def("setActivationResultVariable", &NetworkLevelReasoner::setActivationResultVariable); py::class_ eq(m, "Equation"); eq.def(py::init()); eq.def(py::init()); diff --git a/maraboupy/MarabouNetworkNNet.py b/maraboupy/MarabouNetworkNNet.py index 01b74173b..9d0f7adf4 100644 --- a/maraboupy/MarabouNetworkNNet.py +++ b/maraboupy/MarabouNetworkNNet.py @@ -24,7 +24,7 @@ class MarabouNetworkNNet(MarabouNetwork.MarabouNetwork): """ Class that implements a MarabouNetwork from an NNet file. """ - def __init__ (self, filename, perform_sbt=False): + def __init__ (self, filename, use_nlr=False): """ Constructs a MarabouNetworkNNet object from an .nnet file. @@ -32,10 +32,10 @@ def __init__ (self, filename, perform_sbt=False): filename: path to the .nnet file. Attributes: numLayers (int) The number of layers in the network - layerSizes (list of ints) Layer sizes. + layerSizes (list of ints) Layer sizes. inputSize (int) Size of the input. outputSize (int) Size of the output. - maxLayersize (int) Size of largest layer. + maxLayersize (int) Size of largest layer. inputMinimums (list of floats) Minimum value for each input. inputMaximums (list of floats) Maximum value for each input. inputMeans (list of floats) Mean value for each input. @@ -43,7 +43,7 @@ def __init__ (self, filename, perform_sbt=False): weights (list of list of lists) Outer index corresponds to layer number. biases (list of lists) Outer index corresponds to layer number. - sbt The SymbolicBoundTightener object + nlr The NetworkLeverReasoner object """ super().__init__() @@ -83,46 +83,50 @@ def __init__ (self, filename, perform_sbt=False): # Set the number of variables self.numVars = self.numberOfVariables() - if perform_sbt: - self.sbt = self.createSBT(filename) + if use_nlr: + self.nlr = self.createNLR(filename) else: - self.sbt = None + self.nlr = None - def createSBT(self, filename): - sbt = MarabouCore.SymbolicBoundTightener() - sbt.setNumberOfLayers(self.numLayers + 1) + def createNLR(self, filename): + nlr = MarabouCore.NetworkLevelReasoner() + nlr.setNumberOfLayers(self.numLayers + 1) for layer, size in enumerate(self.layerSizes): - sbt.setLayerSize(layer, size) - sbt.allocateWeightAndBiasSpace() + nlr.setLayerSize(layer, size) + nlr.allocateMemoryByTopology() # Biases for layer in range(len(self.biases)): for node in range(len(self.biases[layer])): - sbt.setBias(layer + 1, node, self.biases[layer][node]) + nlr.setBias(layer + 1, node, self.biases[layer][node]) # Weights for layer in range(len(self.weights)): # starting from the first hidden layer for target in range(len(self.weights[layer])): for source in range(len(self.weights[layer][target])): - sbt.setWeight(layer, source, target, self.weights[layer][target][source]) - - # Initial bounds - for i in range(self.inputSize): - sbt.setInputLowerBound( i, self.getInputMinimum(i)) - sbt.setInputUpperBound( i, self.getInputMaximum(i)) - - # Variable indexing of hidden layers + nlr.setWeight(layer, source, target, self.weights[layer][target][source]) + + # Activation Functions + RELU = 0; + for layer in range(len(self.weights) - 1): # only hidden layers + for neuron in range(len(self.weights[layer])): + nlr.setNeuronActivationFunction(layer, neuron, RELU ); + + # Variable indexing for layer in range(len(self.layerSizes))[1:-1]: for node in range(self.layerSizes[layer]): - sbt.setReluBVariable(layer, node, self.nodeTo_b(layer, node)) - sbt.setReluFVariable(layer, node, self.nodeTo_f(layer, node)) + nlr.setWeightedSumVariable(layer, node, self.nodeTo_b(layer, node)) + nlr.setActivationResultVariable(layer, node, self.nodeTo_f(layer, node)) + + for node in range(self.inputSize): + nlr.setActivationResultVariable(0, node, self.nodeTo_f(0, node)) for node in range(self.outputSize): - sbt.setReluFVariable(len(self.layerSizes) - 1, node, self.nodeTo_b(len(self.layerSizes) - 1, node)) + nlr.setWeightedSumVariable(len(self.layerSizes) - 1, node, self.nodeTo_b(len(self.layerSizes) - 1, node)) - return sbt + return nlr def getMarabouQuery(self): ipq = super(MarabouNetworkNNet, self).getMarabouQuery() - ipq.setSymbolicBoundTightener(self.sbt) + ipq.setNetworkLevelReasoner(self.nlr) return ipq """ @@ -205,18 +209,18 @@ def variableRanges(self): b_variables = [] f_variables = [] output_variables = [] - + input_variables = [i for i in range(self.layerSizes[0])] - + hidden_layers = self.layerSizes[1:-1] - + for layer, hidden_layer_length in enumerate(hidden_layers): for i in range(hidden_layer_length): offset = sum([x*2 for x in hidden_layers[:layer]]) - + b_variables.append(self.layerSizes[0] + offset + i) f_variables.append(self.layerSizes[0] + offset + i+hidden_layer_length) - + #final layer for i in range(self.layerSizes[-1]): offset = sum([x*2 for x in hidden_layers[:len(hidden_layers) - 1]]) @@ -241,10 +245,10 @@ def variableRanges(self): def nodeTo_b(self, layer, node): assert(0 < layer) assert(node < self.layerSizes[layer]) - + offset = self.layerSizes[0] offset += sum([x*2 for x in self.layerSizes[1:layer]]) - + return offset + node """ @@ -261,7 +265,7 @@ def nodeTo_b(self, layer, node): def nodeTo_f(self, layer, node): assert(layer < len(self.layerSizes)) assert(node < self.layerSizes[layer]) - + if layer == 0: return node else: @@ -289,16 +293,16 @@ def buildEquations(self): for node in range(size): #add marabou equation - + equations_aux.append([]) equations_aux[equations_count].append([self.nodeTo_b(layer, node), -1.0]) for previous_node in range(self.layerSizes[layer-1]): equations_aux[equations_count].append([self.nodeTo_f(layer-1, previous_node), self.weights[layer-1][node][previous_node]]) - - + + equations_aux[equations_count].append(-self.biases[layer-1][node]) equations_count += 1 - + return equations_aux """ Identify all relus and their associated variable numbers. @@ -314,7 +318,7 @@ def findRelus(self): for layer, size in enumerate(hidden_layers): for node in range(size): relus.append([self.nodeTo_b(layer+1, node), self.nodeTo_f(layer+1, node)]) - + return relus def numberOfVariables(self): diff --git a/src/configuration/GlobalConfiguration.cpp b/src/configuration/GlobalConfiguration.cpp index d5e4013dd..19c254183 100644 --- a/src/configuration/GlobalConfiguration.cpp +++ b/src/configuration/GlobalConfiguration.cpp @@ -52,7 +52,6 @@ const double GlobalConfiguration::COST_FUNCTION_ERROR_THRESHOLD = 0.0000000001; const bool GlobalConfiguration::USE_HARRIS_RATIO_TEST = true; const bool GlobalConfiguration::USE_SYMBOLIC_BOUND_TIGHTENING = true; -const bool GlobalConfiguration::USE_LINEAR_CONCRETIZATION = true; const double GlobalConfiguration::SYMBOLIC_TIGHTENING_ROUNDING_CONSTANT = 0.00000005; const bool GlobalConfiguration::PREPROCESS_INPUT_QUERY = true; @@ -92,6 +91,7 @@ const bool GlobalConfiguration::PROJECTED_STEEPEST_EDGE_LOGGING = false; const bool GlobalConfiguration::GAUSSIAN_ELIMINATION_LOGGING = false; const bool GlobalConfiguration::QUERY_LOADER_LOGGING = false; const bool GlobalConfiguration::SYMBOLIC_BOUND_TIGHTENER_LOGGING = false; +const bool GlobalConfiguration::NETWORK_LEVEL_REASONER_LOGGING = false; const bool GlobalConfiguration::USE_SMART_FIX = false; const bool GlobalConfiguration::USE_LEAST_FIX = false; diff --git a/src/configuration/GlobalConfiguration.h b/src/configuration/GlobalConfiguration.h index 6786c50e4..e06219849 100644 --- a/src/configuration/GlobalConfiguration.h +++ b/src/configuration/GlobalConfiguration.h @@ -164,10 +164,6 @@ class GlobalConfiguration // Whether symbolic bound tightening should be used or not static const bool USE_SYMBOLIC_BOUND_TIGHTENING; - // If symbolic bound tightening is used, should linear concretization (as - // opposed to constant concretization) be used. - static const bool USE_LINEAR_CONCRETIZATION; - // Symbolic tightening rounding constant static const double SYMBOLIC_TIGHTENING_ROUNDING_CONSTANT; @@ -212,6 +208,7 @@ class GlobalConfiguration static const bool GAUSSIAN_ELIMINATION_LOGGING; static const bool QUERY_LOADER_LOGGING; static const bool SYMBOLIC_BOUND_TIGHTENER_LOGGING; + static const bool NETWORK_LEVEL_REASONER_LOGGING; }; #endif // __GlobalConfiguration_h__ diff --git a/src/engine/CMakeLists.txt b/src/engine/CMakeLists.txt index 46122d992..e9e846521 100644 --- a/src/engine/CMakeLists.txt +++ b/src/engine/CMakeLists.txt @@ -35,7 +35,6 @@ engine_add_unit_test(ProjectedSteepestEdge) engine_add_unit_test(ReluConstraint) engine_add_unit_test(RowBoundTightener) engine_add_unit_test(SmtCore) -engine_add_unit_test(SymbolicBoundTightener) engine_add_unit_test(Tableau) if (${BUILD_PYTHON}) diff --git a/src/engine/Engine.cpp b/src/engine/Engine.cpp index 606643c91..6dc59194f 100644 --- a/src/engine/Engine.cpp +++ b/src/engine/Engine.cpp @@ -30,7 +30,6 @@ Engine::Engine( unsigned verbosity ) : _rowBoundTightener( *_tableau ) - , _symbolicBoundTightener( NULL ) , _smtCore( this ) , _numPlConstraintsDisabledByValidSplits( 0 ) , _preprocessingEnabled( false ) @@ -1046,8 +1045,8 @@ void Engine::initializeNetworkLevelReasoning() { _networkLevelReasoner = _preprocessedQuery.getNetworkLevelReasoner(); - if ( _preprocessedQuery._sbt ) - _symbolicBoundTightener = _preprocessedQuery._sbt; + if ( _networkLevelReasoner ) + _networkLevelReasoner->setTableau( _tableau ); } bool Engine::processInputQuery( InputQuery &inputQuery, bool preprocess ) @@ -1732,71 +1731,39 @@ List Engine::getInputVariables() const void Engine::performSymbolicBoundTightening() { if ( ( !GlobalConfiguration::USE_SYMBOLIC_BOUND_TIGHTENING ) || - ( !_symbolicBoundTightener ) ) + ( !_networkLevelReasoner ) ) return; struct timespec start = TimeUtils::sampleMicro(); unsigned numTightenedBounds = 0; - // Clear any previously stored information - _symbolicBoundTightener->clearReluStatuses(); + // Step 1: tell the NLR about the current bounds + _networkLevelReasoner->obtainCurrentBounds(); - // Step 1: tell the SBT about input bounds; maybe they were tightened - unsigned inputVariableIndex = 0; - for ( const auto &inputVariable : _preprocessedQuery.getInputVariables() ) - { - // We assume the input variables are the first variables - if ( inputVariable != inputVariableIndex ) - { - throw MarabouError( MarabouError::SYMBOLIC_BOUND_TIGHTENER_FAULTY_INPUT, - Stringf( "Sanity check failed, input variable %u with unexpected index %u", inputVariableIndex, inputVariable ).ascii() ); - } - ++inputVariableIndex; - - double min = _tableau->getLowerBound( inputVariable ); - double max = _tableau->getUpperBound( inputVariable ); - - _symbolicBoundTightener->setInputLowerBound( inputVariable, min ); - _symbolicBoundTightener->setInputUpperBound( inputVariable, max ); - } - - // Step 2: tell the SBT about the state of the ReLU constraints - for ( const auto &constraint : _plConstraints ) - { - if ( !constraint->supportsSymbolicBoundTightening() ) - throw MarabouError( MarabouError::SYMBOLIC_BOUND_TIGHTENER_UNSUPPORTED_CONSTRAINT_TYPE ); - - ReluConstraint *relu = (ReluConstraint *)constraint; - unsigned b = relu->getB(); - SymbolicBoundTightener::NodeIndex nodeIndex = _symbolicBoundTightener->nodeIndexFromB( b ); - _symbolicBoundTightener->setReluStatus( nodeIndex._layer, nodeIndex._neuron, relu->getPhaseStatus() ); - } + // Step 2: perform SBT + _networkLevelReasoner->symbolicBoundPropagation(); - // Step 3: perfrom the bound tightening - _symbolicBoundTightener->run(); + // Step 3: Extract the bounds + List tightenings; + _networkLevelReasoner->getConstraintTightenings( tightenings ); - // Stpe 4: extract any tighter bounds that were discovered - for ( const auto &pair : _symbolicBoundTightener->getNodeIndexToFMapping() ) + for ( const auto &tightening : tightenings ) { - unsigned layer = pair.first._layer; - unsigned neuron = pair.first._neuron; - unsigned var = pair.second; - - double lb = _symbolicBoundTightener->getLowerBound( layer, neuron ); - double ub = _symbolicBoundTightener->getUpperBound( layer, neuron ); - - double currentLb = _tableau->getLowerBound( var ); - double currentUb = _tableau->getUpperBound( var ); - - _tableau->tightenLowerBound( var, lb ); - _tableau->tightenUpperBound( var, ub ); - - if ( FloatUtils::lt( ub, currentUb ) ) + if ( tightening._type == Tightening::LB && + _tableau->getLowerBound( tightening._variable ) < tightening._value ) + { + _tableau->tightenLowerBound( tightening._variable, tightening._value ); ++numTightenedBounds; + } + - if ( FloatUtils::gt( lb, currentLb ) ) + if ( tightening._type == Tightening::UB && + _tableau->getUpperBound( tightening._variable ) > tightening._value ) + { + _tableau->tightenUpperBound( tightening._variable, tightening._value ); ++numTightenedBounds; + } } struct timespec end = TimeUtils::sampleMicro(); diff --git a/src/engine/Engine.h b/src/engine/Engine.h index c76c78b22..c76f3d1eb 100644 --- a/src/engine/Engine.h +++ b/src/engine/Engine.h @@ -219,11 +219,6 @@ class Engine : public IEngine, public SignalHandler::Signalable */ AutoRowBoundTightener _rowBoundTightener; - /* - Symbolic bound tightnere. - */ - SymbolicBoundTightener *_symbolicBoundTightener; - /* The SMT engine is in charge of case splitting. */ diff --git a/src/engine/InputQuery.cpp b/src/engine/InputQuery.cpp index 70ad73951..3fdef325a 100644 --- a/src/engine/InputQuery.cpp +++ b/src/engine/InputQuery.cpp @@ -22,7 +22,6 @@ InputQuery::InputQuery() : _networkLevelReasoner( NULL ) - , _sbt( NULL ) { } @@ -34,12 +33,6 @@ InputQuery::~InputQuery() delete _networkLevelReasoner; _networkLevelReasoner = NULL; } - - if ( _sbt ) - { - delete _sbt; - _sbt = NULL; - } } void InputQuery::setNumberOfVariables( unsigned numberOfVariables ) @@ -244,27 +237,11 @@ InputQuery &InputQuery::operator=( const InputQuery &other ) } } - if ( other._sbt ) - { - if ( !_sbt ) - _sbt = new SymbolicBoundTightener; - other._sbt->storeIntoOther( *_sbt ); - } - else - { - if ( _sbt ) - { - delete _sbt; - _sbt = NULL; - } - } - return *this; } InputQuery::InputQuery( const InputQuery &other ) : _networkLevelReasoner( NULL ) - , _sbt( NULL ) { *this = other; } @@ -503,11 +480,6 @@ void InputQuery::dump() const } } -void InputQuery::setSymbolicBoundTightener( SymbolicBoundTightener *sbt ) -{ - _sbt = sbt; -} - void InputQuery::adjustInputOutputMapping( const Map &oldIndexToNewIndex, const Map &mergedVariables ) { diff --git a/src/engine/InputQuery.h b/src/engine/InputQuery.h index a2603b980..ae32aa0ad 100644 --- a/src/engine/InputQuery.h +++ b/src/engine/InputQuery.h @@ -22,7 +22,6 @@ #include "Map.h" #include "NetworkLevelReasoner.h" #include "PiecewiseLinearConstraint.h" -#include "SymbolicBoundTightener.h" class InputQuery { @@ -93,11 +92,6 @@ class InputQuery InputQuery &operator=( const InputQuery &other ); InputQuery( const InputQuery &other ); - /* - Attach a symbolic bound tightener to the input query - */ - void setSymbolicBoundTightener( SymbolicBoundTightener *sbt ); - /* Debugging methods */ @@ -163,11 +157,6 @@ class InputQuery evaluation of topology-based bound tightening. */ NetworkLevelReasoner *_networkLevelReasoner; - - /* - Symbolic bound tightener. - */ - SymbolicBoundTightener *_sbt; }; #endif // __InputQuery_h__ diff --git a/src/engine/NetworkLevelReasoner.cpp b/src/engine/NetworkLevelReasoner.cpp index ab9c5edd8..38196a176 100644 --- a/src/engine/NetworkLevelReasoner.cpp +++ b/src/engine/NetworkLevelReasoner.cpp @@ -18,11 +18,15 @@ #include "MStringf.h" #include "MarabouError.h" #include "NetworkLevelReasoner.h" +#include "ReluConstraint.h" #include NetworkLevelReasoner::NetworkLevelReasoner() : _weights( NULL ) + , _positiveWeights( NULL ) + , _negativeWeights( NULL ) , _maxLayerSize( 0 ) + , _inputLayerSize( 0 ) , _work1( NULL ) , _work2( NULL ) , _tableau( NULL ) @@ -30,6 +34,14 @@ NetworkLevelReasoner::NetworkLevelReasoner() , _upperBoundsWeightedSums( NULL ) , _lowerBoundsActivations( NULL ) , _upperBoundsActivations( NULL ) + , _currentLayerLowerBounds( NULL ) + , _currentLayerUpperBounds( NULL ) + , _currentLayerLowerBias( NULL ) + , _currentLayerUpperBias( NULL ) + , _previousLayerLowerBounds( NULL ) + , _previousLayerUpperBounds( NULL ) + , _previousLayerLowerBias( NULL ) + , _previousLayerUpperBias( NULL ) { } @@ -55,6 +67,36 @@ void NetworkLevelReasoner::freeMemoryIfNeeded() _weights = NULL; } + if ( _positiveWeights ) + { + for ( unsigned i = 0; i < _numberOfLayers - 1; ++i ) + { + if ( _positiveWeights[i] ) + { + delete[] _positiveWeights[i]; + _positiveWeights[i] = NULL; + } + } + + delete[] _positiveWeights; + _positiveWeights = NULL; + } + + if ( _negativeWeights ) + { + for ( unsigned i = 0; i < _numberOfLayers - 1; ++i ) + { + if ( _negativeWeights[i] ) + { + delete[] _negativeWeights[i]; + _negativeWeights[i] = NULL; + } + } + + delete[] _negativeWeights; + _negativeWeights = NULL; + } + if ( _work1 ) { delete[] _work1; @@ -126,6 +168,54 @@ void NetworkLevelReasoner::freeMemoryIfNeeded() delete[] _upperBoundsActivations; _upperBoundsActivations = NULL; } + + if ( _currentLayerLowerBounds ) + { + delete[] _currentLayerLowerBounds; + _currentLayerLowerBounds = NULL; + } + + if ( _currentLayerUpperBounds ) + { + delete[] _currentLayerUpperBounds; + _currentLayerUpperBounds = NULL; + } + + if ( _currentLayerLowerBias ) + { + delete[] _currentLayerLowerBias; + _currentLayerLowerBias = NULL; + } + + if ( _currentLayerUpperBias ) + { + delete[] _currentLayerUpperBias; + _currentLayerUpperBias = NULL; + } + + if ( _previousLayerLowerBounds ) + { + delete[] _previousLayerLowerBounds; + _previousLayerLowerBounds = NULL; + } + + if ( _previousLayerUpperBounds ) + { + delete[] _previousLayerUpperBounds; + _previousLayerUpperBounds = NULL; + } + + if ( _previousLayerLowerBias ) + { + delete[] _previousLayerLowerBias; + _previousLayerLowerBias = NULL; + } + + if ( _previousLayerUpperBias ) + { + delete[] _previousLayerUpperBias; + _previousLayerUpperBias = NULL; + } } void NetworkLevelReasoner::setNumberOfLayers( unsigned numberOfLayers ) @@ -140,13 +230,16 @@ void NetworkLevelReasoner::setLayerSize( unsigned layer, unsigned size ) if ( size > _maxLayerSize ) _maxLayerSize = size; + + if ( layer == 0 ) + _inputLayerSize = size; } void NetworkLevelReasoner::allocateMemoryByTopology() { freeMemoryIfNeeded(); - _weights = new double*[_numberOfLayers - 1]; + _weights = new double *[_numberOfLayers - 1]; if ( !_weights ) throw MarabouError( MarabouError::ALLOCATION_FAILED, "NetworkLevelReasoner::weights" ); @@ -158,6 +251,30 @@ void NetworkLevelReasoner::allocateMemoryByTopology() std::fill_n( _weights[i], _layerSizes[i] * _layerSizes[i+1], 0 ); } + _positiveWeights = new double *[_numberOfLayers - 1]; + if ( !_positiveWeights ) + throw MarabouError( MarabouError::ALLOCATION_FAILED, "NetworkLevelReasoner::positiveWeights" ); + + for ( unsigned i = 0; i < _numberOfLayers - 1; ++i ) + { + _positiveWeights[i] = new double[_layerSizes[i] * _layerSizes[i+1]]; + if ( !_positiveWeights[i] ) + throw MarabouError( MarabouError::ALLOCATION_FAILED, "NetworkLevelReasoner::positiveWeights[i]" ); + std::fill_n( _positiveWeights[i], _layerSizes[i] * _layerSizes[i+1], 0 ); + } + + _negativeWeights = new double *[_numberOfLayers - 1]; + if ( !_negativeWeights ) + throw MarabouError( MarabouError::ALLOCATION_FAILED, "NetworkLevelReasoner::negativeWeights" ); + + for ( unsigned i = 0; i < _numberOfLayers - 1; ++i ) + { + _negativeWeights[i] = new double[_layerSizes[i] * _layerSizes[i+1]]; + if ( !_negativeWeights[i] ) + throw MarabouError( MarabouError::ALLOCATION_FAILED, "NetworkLevelReasoner::negativeWeights[i]" ); + std::fill_n( _negativeWeights[i], _layerSizes[i] * _layerSizes[i+1], 0 ); + } + _work1 = new double[_maxLayerSize]; if ( !_work1 ) throw MarabouError( MarabouError::ALLOCATION_FAILED, "NetworkLevelReasoner::work1" ); @@ -186,6 +303,16 @@ void NetworkLevelReasoner::allocateMemoryByTopology() !_lowerBoundsActivations[i] || !_lowerBoundsActivations[i] ) throw MarabouError( MarabouError::ALLOCATION_FAILED, "NetworkLevelReasoner::bounds[i]" ); } + + _currentLayerLowerBounds = new double[_maxLayerSize * _layerSizes[0]]; + _currentLayerUpperBounds = new double[_maxLayerSize * _layerSizes[0]]; + _currentLayerLowerBias = new double[_maxLayerSize]; + _currentLayerUpperBias = new double[_maxLayerSize]; + + _previousLayerLowerBounds = new double[_maxLayerSize * _layerSizes[0]]; + _previousLayerUpperBounds = new double[_maxLayerSize * _layerSizes[0]]; + _previousLayerLowerBias = new double[_maxLayerSize]; + _previousLayerUpperBias = new double[_maxLayerSize]; } void NetworkLevelReasoner::setNeuronActivationFunction( unsigned layer, unsigned neuron, ActivationFunction activationFuction ) @@ -199,6 +326,11 @@ void NetworkLevelReasoner::setWeight( unsigned sourceLayer, unsigned sourceNeuro unsigned targetLayerSize = _layerSizes[sourceLayer + 1]; _weights[sourceLayer][sourceNeuron * targetLayerSize + targetNeuron] = weight; + + if ( weight > 0 ) + _positiveWeights[sourceLayer][sourceNeuron * targetLayerSize + targetNeuron] = weight; + else + _negativeWeights[sourceLayer][sourceNeuron * targetLayerSize + targetNeuron] = weight; } void NetworkLevelReasoner::setBias( unsigned layer, unsigned neuron, double bias ) @@ -261,6 +393,7 @@ void NetworkLevelReasoner::evaluate( double *input, double *output ) void NetworkLevelReasoner::setWeightedSumVariable( unsigned layer, unsigned neuron, unsigned variable ) { _indexToWeightedSumVariable[Index( layer, neuron )] = variable; + _weightedSumVariableToIndex[variable] = Index( layer, neuron ); } unsigned NetworkLevelReasoner::getWeightedSumVariable( unsigned layer, unsigned neuron ) const @@ -275,6 +408,7 @@ unsigned NetworkLevelReasoner::getWeightedSumVariable( unsigned layer, unsigned void NetworkLevelReasoner::setActivationResultVariable( unsigned layer, unsigned neuron, unsigned variable ) { _indexToActivationResultVariable[Index( layer, neuron )] = variable; + _activationResultVariableToIndex[variable] = Index( layer, neuron ); } unsigned NetworkLevelReasoner::getActivationResultVariable( unsigned layer, unsigned neuron ) const @@ -299,15 +433,23 @@ void NetworkLevelReasoner::storeIntoOther( NetworkLevelReasoner &other ) const other.setNeuronActivationFunction( pair.first._layer, pair.first._neuron, pair.second ); for ( unsigned i = 0; i < _numberOfLayers - 1; ++i ) + { memcpy( other._weights[i], _weights[i], sizeof(double) * _layerSizes[i] * _layerSizes[i+1] ); + memcpy( other._positiveWeights[i], _positiveWeights[i], sizeof(double) * _layerSizes[i] * _layerSizes[i+1] ); + memcpy( other._negativeWeights[i], _negativeWeights[i], sizeof(double) * _layerSizes[i] * _layerSizes[i+1] ); + } for ( const auto &pair : _bias ) other.setBias( pair.first._layer, pair.first._neuron, pair.second ); other._indexToWeightedSumVariable = _indexToWeightedSumVariable; other._indexToActivationResultVariable = _indexToActivationResultVariable; + other._weightedSumVariableToIndex = _weightedSumVariableToIndex; + other._activationResultVariableToIndex = _activationResultVariableToIndex; other._indexToWeightedSumAssignment = _indexToWeightedSumAssignment; other._indexToActivationResultAssignment = _indexToActivationResultAssignment; + other._eliminatedWeightedSumVariables = _eliminatedWeightedSumVariables; + other._eliminatedActivationResultVariables = _eliminatedActivationResultVariables; } const Map &NetworkLevelReasoner::getIndexToWeightedSumVariable() @@ -417,15 +559,46 @@ void NetworkLevelReasoner::updateVariableIndices( const Map } } -void NetworkLevelReasoner::obtainInputBounds() +void NetworkLevelReasoner::obtainCurrentBounds() { ASSERT( _tableau ); - for ( unsigned i = 0; i < _layerSizes[0]; ++i ) + for ( unsigned i = 0; i < _numberOfLayers - 1; ++i ) { - unsigned varIndex = _indexToActivationResultVariable[Index( 0, i )]; - _lowerBoundsActivations[0][i] = _tableau->getLowerBound( varIndex ); - _upperBoundsActivations[0][i] = _tableau->getUpperBound( varIndex ); + for ( unsigned j = 0; j < _layerSizes[i]; ++j ) + { + if ( _indexToActivationResultVariable.exists( Index( i, j ) ) ) + { + unsigned varIndex = _indexToActivationResultVariable[Index( i, j )]; + _lowerBoundsActivations[i][j] = _tableau->getLowerBound( varIndex ); + _upperBoundsActivations[i][j] = _tableau->getUpperBound( varIndex ); + } + else + { + ASSERT( _eliminatedActivationResultVariables.exists( Index( i, j ) ) ); + _lowerBoundsActivations[i][j] = _eliminatedActivationResultVariables[Index( i, j )]; + _upperBoundsActivations[i][j] = _eliminatedActivationResultVariables[Index( i, j )]; + } + } + } + + for ( unsigned i = 1; i < _numberOfLayers; ++i ) + { + for ( unsigned j = 0; j < _layerSizes[i]; ++j ) + { + if ( _indexToWeightedSumVariable.exists( Index( i, j ) ) ) + { + unsigned varIndex = _indexToWeightedSumVariable[Index( i, j )]; + _lowerBoundsWeightedSums[i][j] = _tableau->getLowerBound( varIndex ); + _upperBoundsWeightedSums[i][j] = _tableau->getUpperBound( varIndex ); + } + else + { + ASSERT( _eliminatedWeightedSumVariables.exists( Index( i, j ) ) ); + _lowerBoundsWeightedSums[i][j] = _eliminatedWeightedSumVariables[Index( i, j )]; + _upperBoundsWeightedSums[i][j] = _eliminatedWeightedSumVariables[Index( i, j )]; + } + } } } @@ -486,6 +659,284 @@ void NetworkLevelReasoner::intervalArithmeticBoundPropagation() } } +void NetworkLevelReasoner::symbolicBoundPropagation() +{ + /* + Initialize the symbolic bounds for the first layer. Each + variable has symbolic upper and lower bound 1 for itself, 0 for + all other varibales. The input layer has no biases. + */ + + std::fill_n( _previousLayerLowerBounds, _maxLayerSize * _inputLayerSize, 0 ); + std::fill_n( _previousLayerUpperBounds, _maxLayerSize * _inputLayerSize, 0 ); + for ( unsigned i = 0; i < _inputLayerSize; ++i ) + { + _previousLayerLowerBounds[i * _inputLayerSize + i] = 1; + _previousLayerUpperBounds[i * _inputLayerSize + i] = 1; + } + std::fill_n( _previousLayerLowerBias, _maxLayerSize, 0 ); + std::fill_n( _previousLayerUpperBias, _maxLayerSize, 0 ); + + for ( unsigned currentLayer = 1; currentLayer < _numberOfLayers; ++currentLayer ) + { + unsigned currentLayerSize = _layerSizes[currentLayer]; + unsigned previousLayerSize = _layerSizes[currentLayer - 1]; + + /* + Computing symbolic bounds for layer i, based on layer i-1. + We assume that the bounds for the previous layer have been + computed. + */ + std::fill_n( _currentLayerLowerBounds, _maxLayerSize * _inputLayerSize, 0 ); + std::fill_n( _currentLayerUpperBounds, _maxLayerSize * _inputLayerSize, 0 ); + + /* + Perform the multiplication. + + newUB = oldUB * posWeights + oldLB * negWeights + newLB = oldUB * negWeights + oldLB * posWeights + + dimensions for oldUB and oldLB: inputLayerSize x previousLayerSize + dimensions for posWeights and negWeights: previousLayerSize x layerSize + + newUB, newLB dimensions: inputLayerSize x layerSize + */ + + for ( unsigned i = 0; i < _inputLayerSize; ++i ) + { + for ( unsigned j = 0; j < currentLayerSize; ++j ) + { + for ( unsigned k = 0; k < previousLayerSize; ++k ) + { + _currentLayerLowerBounds[i * currentLayerSize + j] += + _previousLayerUpperBounds[i * previousLayerSize + k] * + _negativeWeights[currentLayer - 1][k * currentLayerSize + j]; + + _currentLayerLowerBounds[i * currentLayerSize + j] += + _previousLayerLowerBounds[i * previousLayerSize + k] * + _positiveWeights[currentLayer - 1][k * currentLayerSize + j]; + + _currentLayerUpperBounds[i * currentLayerSize + j] += + _previousLayerUpperBounds[i * previousLayerSize + k] * + _positiveWeights[currentLayer - 1][k * currentLayerSize + j]; + + _currentLayerUpperBounds[i * currentLayerSize + j] += + _previousLayerLowerBounds[i * previousLayerSize + k] * + _negativeWeights[currentLayer - 1][k * currentLayerSize + j]; + } + } + } + + /* + Compute the biases for the new layer + */ + for ( unsigned j = 0; j < currentLayerSize; ++j ) + { + _currentLayerLowerBias[j] = _bias[Index( currentLayer, j )]; + _currentLayerUpperBias[j] = _bias[Index( currentLayer, j )]; + + // Add the weighted bias from the previous layer + for ( unsigned k = 0; k < previousLayerSize; ++k ) + { + double weight = _weights[currentLayer - 1][k * currentLayerSize + j]; + + if ( weight > 0 ) + { + _currentLayerLowerBias[j] += _previousLayerLowerBias[k] * weight; + _currentLayerUpperBias[j] += _previousLayerUpperBias[k] * weight; + } + else + { + _currentLayerLowerBias[j] += _previousLayerUpperBias[k] * weight; + _currentLayerUpperBias[j] += _previousLayerLowerBias[k] * weight; + } + } + } + + /* + We now have the symbolic representation for the new + layer. Next, we compute new lower and upper bounds for + it. For each of these bounds, we compute an upper bound and + a lower bound. + + newUB, newLB dimensions: inputLayerSize x layerSize + */ + for ( unsigned i = 0; i < currentLayerSize; ++i ) + { + /* + lbLb: the lower bound for the expression of the lower bound + lbUb: the upper bound for the expression of the lower bound + etc. + */ + + double lbLb = 0; + double lbUb = 0; + double ubLb = 0; + double ubUb = 0; + + for ( unsigned j = 0; j < _inputLayerSize; ++j ) + { + double entry = _currentLayerLowerBounds[j * currentLayerSize + i]; + + if ( entry >= 0 ) + { + lbLb += ( entry * _lowerBoundsActivations[0][j] ); + lbUb += ( entry * _upperBoundsActivations[0][j] ); + } + else + { + lbLb += ( entry * _upperBoundsActivations[0][j] ); + lbUb += ( entry * _lowerBoundsActivations[0][j] ); + } + + entry = _currentLayerUpperBounds[j * currentLayerSize + i]; + + if ( entry >= 0 ) + { + ubLb += ( entry * _lowerBoundsActivations[0][j] ); + ubUb += ( entry * _upperBoundsActivations[0][j] ); + } + else + { + ubLb += ( entry * _upperBoundsActivations[0][j] ); + ubUb += ( entry * _lowerBoundsActivations[0][j] ); + } + } + + // Add the network bias to all bounds + lbLb += _currentLayerLowerBias[i]; + lbUb += _currentLayerLowerBias[i]; + ubLb += _currentLayerUpperBias[i]; + ubUb += _currentLayerUpperBias[i]; + + /* + We now have the tightest bounds we can for the weighted + sum variable. If they are tigheter than what was + previously known, store them. + */ + if ( _lowerBoundsWeightedSums[currentLayer][i] < lbLb ) + _lowerBoundsWeightedSums[currentLayer][i] = lbLb; + if ( _upperBoundsWeightedSums[currentLayer][i] > ubUb ) + _upperBoundsWeightedSums[currentLayer][i] = ubUb; + + /* + Next, we see what can be deduced regarding the bounds of + the activation result + */ + if ( currentLayer < _numberOfLayers - 1 ) + { + /* + There are two ways we can determine that a ReLU has become fixed: + + 1. If the ReLU's variables have been externally fixed + 2. lbLb >= 0 (ACTIVE) or ubUb <= 0 (INACTIVE) + */ + + ReluConstraint::PhaseStatus reluPhase = ReluConstraint::PHASE_NOT_FIXED; + if ( _eliminatedWeightedSumVariables.exists( Index( currentLayer, i ) ) ) + { + if ( _eliminatedWeightedSumVariables[Index( currentLayer, i )] <= 0 ) + reluPhase = ReluConstraint::PHASE_INACTIVE; + else + reluPhase = ReluConstraint::PHASE_ACTIVE; + } + else if ( _eliminatedActivationResultVariables.exists( Index( currentLayer, i ) ) ) + { + if ( FloatUtils::isZero( _eliminatedWeightedSumVariables[Index( currentLayer, i )] ) ) + reluPhase = ReluConstraint::PHASE_INACTIVE; + else + reluPhase = ReluConstraint::PHASE_ACTIVE; + } + else if ( _lowerBoundsWeightedSums[currentLayer][i] >= 0 ) + reluPhase = ReluConstraint::PHASE_ACTIVE; + else if ( _upperBoundsWeightedSums[currentLayer][i] <= 0 ) + reluPhase = ReluConstraint::PHASE_INACTIVE; + + /* + If the ReLU phase is not fixed yet, see whether the + newly computed bounds imply that it should be fixed. + */ + if ( reluPhase == ReluConstraint::PHASE_NOT_FIXED ) + { + // If we got here, we know that lbLb < 0 and ubUb + // > 0 There are four possible cases, depending on + // whether ubLb and lbUb are negative or positive + // (see Neurify paper, page 14). + + // Upper bound + if ( ubLb <= 0 ) + { + // ubLb < 0 < ubUb + // Concretize the upper bound using the Ehler's-like approximation + for ( unsigned j = 0; j < _inputLayerSize; ++j ) + _currentLayerUpperBounds[j * currentLayerSize + i] = + _currentLayerUpperBounds[j * currentLayerSize + i] * ubUb / ( ubUb - ubLb ); + + // Do the same for the bias, and then adjust + _currentLayerUpperBias[i] = _currentLayerUpperBias[i] * ubUb / ( ubUb - ubLb ); + _currentLayerUpperBias[i] -= ubLb * ubUb / ( ubUb - ubLb ); + } + + // Lower bound + if ( lbUb <= 0 ) + { + for ( unsigned j = 0; j < _inputLayerSize; ++j ) + _currentLayerLowerBounds[j * currentLayerSize + i] = 0; + + _currentLayerLowerBias[i] = 0; + } + else + { + for ( unsigned j = 0; j < _inputLayerSize; ++j ) + _currentLayerLowerBounds[j * currentLayerSize + i] = + _currentLayerLowerBounds[j * currentLayerSize + i] * lbUb / ( lbUb - lbLb ); + + _currentLayerLowerBias[i] = _currentLayerLowerBias[i] * lbUb / ( lbUb - lbLb ); + } + + lbLb = 0; + } + else + { + // The phase of this ReLU is fixed! + if ( reluPhase == ReluConstraint::PHASE_ACTIVE ) + { + // Active ReLU, bounds are propagated as is + } + else + { + // Inactive ReLU, returns zero + lbLb = 0; + lbUb = 0; + ubLb = 0; + ubUb = 0; + + for ( unsigned j = 0; j < _inputLayerSize; ++j ) + { + _currentLayerLowerBounds[j * currentLayerSize + i] = 0; + _currentLayerUpperBounds[j * currentLayerSize + i] = 0; + } + _currentLayerLowerBias[i] = 0; + _currentLayerUpperBias[i] = 0; + } + } + } + + // Store the bounds for this neuron + if ( _lowerBoundsActivations[currentLayer][i] < lbLb ) + _lowerBoundsActivations[currentLayer][i] = lbLb; + if ( _upperBoundsActivations[currentLayer][i] > ubUb ) + _upperBoundsActivations[currentLayer][i] = ubUb; + } + + // Prepare for next iteration + memcpy( _previousLayerLowerBounds, _currentLayerLowerBounds, sizeof(double) * _maxLayerSize * _inputLayerSize ); + memcpy( _previousLayerUpperBounds, _currentLayerUpperBounds, sizeof(double) * _maxLayerSize * _inputLayerSize ); + memcpy( _previousLayerLowerBias, _currentLayerLowerBias, sizeof(double) * _maxLayerSize ); + memcpy( _previousLayerUpperBias, _currentLayerUpperBias, sizeof(double) * _maxLayerSize ); + } +} + void NetworkLevelReasoner::getConstraintTightenings( List &tightenings ) const { tightenings.clear(); @@ -497,28 +948,63 @@ void NetworkLevelReasoner::getConstraintTightenings( List &tightenin Index index( i, j ); // Weighted sums - tightenings.append( Tightening( _indexToWeightedSumVariable[index], - _lowerBoundsWeightedSums[i][j], - Tightening::LB ) ); + if ( _indexToWeightedSumVariable.exists( index ) ) + { + tightenings.append( Tightening( _indexToWeightedSumVariable[index], + _lowerBoundsWeightedSums[i][j], + Tightening::LB ) ); - tightenings.append( Tightening( _indexToWeightedSumVariable[index], - _upperBoundsWeightedSums[i][j], - Tightening::UB ) ); + tightenings.append( Tightening( _indexToWeightedSumVariable[index], + _upperBoundsWeightedSums[i][j], + Tightening::UB ) ); + } // Activation results if ( i != _numberOfLayers - 1 ) { - tightenings.append( Tightening( _indexToActivationResultVariable[index], - _lowerBoundsActivations[i][j], - Tightening::LB ) ); - tightenings.append( Tightening( _indexToActivationResultVariable[index], - _upperBoundsActivations[i][j], - Tightening::UB ) ); + if ( _indexToActivationResultVariable.exists( index ) ) + { + tightenings.append( Tightening( _indexToActivationResultVariable[index], + _lowerBoundsActivations[i][j], + Tightening::LB ) ); + tightenings.append( Tightening( _indexToActivationResultVariable[index], + _upperBoundsActivations[i][j], + Tightening::UB ) ); + } } } } } +void NetworkLevelReasoner::eliminateVariable( unsigned variable, double value ) +{ + if ( _weightedSumVariableToIndex.exists( variable ) ) + { + Index index = _weightedSumVariableToIndex[variable]; + + _indexToWeightedSumVariable.erase( index ); + _weightedSumVariableToIndex.erase( variable ); + + _eliminatedWeightedSumVariables[index] = value; + } + + if ( _activationResultVariableToIndex.exists( variable ) ) + { + Index index = _activationResultVariableToIndex[variable]; + + _indexToActivationResultVariable.erase( index ); + _activationResultVariableToIndex.erase( variable ); + + _eliminatedActivationResultVariables[index] = value; + } +} + +void NetworkLevelReasoner::log( const String &message ) +{ + if ( GlobalConfiguration::NETWORK_LEVEL_REASONER_LOGGING ) + printf( "%s", message.ascii() ); +} + // // Local Variables: // compile-command: "make -C ../.. " diff --git a/src/engine/NetworkLevelReasoner.h b/src/engine/NetworkLevelReasoner.h index b06753776..291c78690 100644 --- a/src/engine/NetworkLevelReasoner.h +++ b/src/engine/NetworkLevelReasoner.h @@ -65,7 +65,7 @@ class NetworkLevelReasoner functions, weights and biases, etc. */ enum ActivationFunction { - ReLU, + ReLU = 0, }; void setNumberOfLayers( unsigned numberOfLayers ); @@ -110,25 +110,39 @@ class NetworkLevelReasoner void storeIntoOther( NetworkLevelReasoner &other ) const; /* - Methods that are typically invoked by the preprocessor, - to inform us of changes in variable indices + Methods that are typically invoked by the preprocessor, to + inform us of changes in variable indices or if a variable has + been eliminated */ + void eliminateVariable( unsigned variable, double value ); void updateVariableIndices( const Map &oldIndexToNewIndex, const Map &mergedVariables ); /* Bound propagation methods: - - obtainInputBounds: obtain the current bounds on input variables + - obtainCurrentBounds: obtain the current bounds on all variables from the tableau. - Interval arithmetic: compute the bounds of a layer's neurons based on the concrete bounds of the previous layer. + + - Symbolic: for each neuron in the network, we compute lower + and upper bounds on the lower and upper bounds of the + neuron. This bounds are expressed as linear combinations of + the input neurons. Sometimes these bounds let us simplify + expressions and obtain tighter bounds (e.g., if the upper + bound on the upper bound of a ReLU node is negative, that + ReLU is inactive and its output can be set to 0. + + Initialize should be called once, before the bound + propagation is performed. */ void setTableau( const ITableau *tableau ); - void obtainInputBounds(); + void obtainCurrentBounds(); void intervalArithmeticBoundPropagation(); + void symbolicBoundPropagation(); void getConstraintTightenings( List &tightenings ) const; @@ -137,9 +151,12 @@ class NetworkLevelReasoner Map _layerSizes; Map _neuronToActivationFunction; double **_weights; + double **_positiveWeights; + double **_negativeWeights; Map _bias; unsigned _maxLayerSize; + unsigned _inputLayerSize; double *_work1; double *_work2; @@ -153,6 +170,8 @@ class NetworkLevelReasoner */ Map _indexToWeightedSumVariable; Map _indexToActivationResultVariable; + Map _weightedSumVariableToIndex; + Map _activationResultVariableToIndex; /* Store the assignment to all variables when evaluate() is called @@ -161,12 +180,33 @@ class NetworkLevelReasoner Map _indexToActivationResultAssignment; /* - Work space for bound tightening. + Store eliminated variables + */ + Map _eliminatedWeightedSumVariables; + Map _eliminatedActivationResultVariables; + + /* + Work space for bound tightening */ double **_lowerBoundsWeightedSums; double **_upperBoundsWeightedSums; double **_lowerBoundsActivations; double **_upperBoundsActivations; + + /* + Work space for symbolic bound propagation + */ + double *_currentLayerLowerBounds; + double *_currentLayerUpperBounds; + double *_currentLayerLowerBias; + double *_currentLayerUpperBias; + + double *_previousLayerLowerBounds; + double *_previousLayerUpperBounds; + double *_previousLayerLowerBias; + double *_previousLayerUpperBias; + + static void log( const String &message ); }; #endif // __NetworkLevelReasoner_h__ diff --git a/src/engine/Preprocessor.cpp b/src/engine/Preprocessor.cpp index b7896822c..69b0e34d2 100644 --- a/src/engine/Preprocessor.cpp +++ b/src/engine/Preprocessor.cpp @@ -22,12 +22,8 @@ #include "Preprocessor.h" #include "MarabouError.h" #include "Statistics.h" -#include "SymbolicBoundTightener.h" #include "Tightening.h" -// TODO: get rid of this include -#include "ReluConstraint.h" - #ifdef _WIN32 #undef INFINITE #endif @@ -590,6 +586,13 @@ void Preprocessor::eliminateVariables() } } + // Inform the NLR about eliminated varibales + if ( _preprocessed._networkLevelReasoner ) + { + for ( const auto &fixed : _fixedVariables ) + _preprocessed._networkLevelReasoner->eliminateVariable( fixed.first, fixed.second ); + } + // Compute the new variable indices, after the elimination of fixed variables int offset = 0; for ( unsigned i = 0; i < _preprocessed.getNumberOfVariables(); ++i ) @@ -661,17 +664,6 @@ void Preprocessor::eliminateVariables() if ( (*constraint)->constraintObsolete() ) { - if ( _preprocessed._sbt ) - { - if ( !(*constraint)->supportsSymbolicBoundTightening() ) - throw MarabouError( MarabouError::SYMBOLIC_BOUND_TIGHTENER_UNSUPPORTED_CONSTRAINT_TYPE ); - - ReluConstraint *relu = (ReluConstraint *)(*constraint); - unsigned b = relu->getB(); - SymbolicBoundTightener::NodeIndex nodeIndex = _preprocessed._sbt->nodeIndexFromB( b ); - _preprocessed._sbt->setEliminatedRelu( nodeIndex._layer, nodeIndex._neuron, relu->getPhaseStatus() ); - } - if ( _statistics ) _statistics->ppIncNumConstraintsRemoved(); @@ -694,10 +686,6 @@ void Preprocessor::eliminateVariables() } } - // Let the SBT know of changes in indices and merged variables - if ( _preprocessed._sbt ) - _preprocessed._sbt->updateVariableIndices( _oldIndexToNewIndex, _mergedVariables, _fixedVariables ); - // Let the NLR know of changes in indices and merged variables if ( _preprocessed._networkLevelReasoner ) _preprocessed._networkLevelReasoner->updateVariableIndices( _oldIndexToNewIndex, _mergedVariables ); diff --git a/src/engine/SymbolicBoundTightener.cpp b/src/engine/SymbolicBoundTightener.cpp deleted file mode 100644 index 419365dba..000000000 --- a/src/engine/SymbolicBoundTightener.cpp +++ /dev/null @@ -1,894 +0,0 @@ -/********************* */ -/*! \file SymbolicBoundTightener.cpp - ** \verbatim - ** Top contributors (to current version): - ** Guy Katz - ** This file is part of the Marabou project. - ** Copyright (c) 2017-2019 by the authors listed in the file AUTHORS - ** in the top-level source directory) and their institutional affiliations. - ** All rights reserved. See the file COPYING in the top-level source - ** directory for licensing information.\endverbatim - ** - ** [[ Add lengthier description here ]] - - **/ - -#include "Debug.h" -#include "FloatUtils.h" -#include "MStringf.h" -#include "MarabouError.h" -#include "SymbolicBoundTightener.h" - -SymbolicBoundTightener::SymbolicBoundTightener() - : _layerSizes( NULL ) - , _biases( NULL ) - , _weights( NULL ) - , _lowerBounds( NULL ) - , _upperBounds( NULL ) - , _currentLayerLowerBounds( NULL ) - , _currentLayerUpperBounds( NULL ) - , _currentLayerLowerBias( NULL ) - , _currentLayerUpperBias( NULL ) - , _previousLayerLowerBounds( NULL ) - , _previousLayerUpperBounds( NULL ) - , _previousLayerLowerBias( NULL ) - , _previousLayerUpperBias( NULL ) -{ - if ( GlobalConfiguration::USE_COLUMN_MERGING_EQUATIONS ) - throw MarabouError( MarabouError::SYMBOLIC_BOUND_TIGHTENER_OPTION_NOT_SUPPORTED, - "Cannot run SBT with Column Merging!" ); -} - -SymbolicBoundTightener::~SymbolicBoundTightener() -{ - freeMemoryIfNeeded(); -} - -void SymbolicBoundTightener::freeMemoryIfNeeded() -{ - if ( _biases ) - { - for ( unsigned i = 0; i < _numberOfLayers; ++i ) - { - if ( _biases[i] ) - { - delete[] _biases[i]; - _biases[i] = NULL; - } - } - - delete[] _biases; - _biases = NULL; - } - - if ( _weights ) - { - for ( unsigned i = 0; i < _numberOfLayers - 1; ++i ) - { - if ( _weights[i]._positiveValues ) - { - delete[] _weights[i]._positiveValues; - _weights[i]._positiveValues = NULL; - } - - if ( _weights[i]._negativeValues ) - { - delete[] _weights[i]._negativeValues; - _weights[i]._negativeValues = NULL; - } - } - - delete[] _weights; - _weights = NULL; - } - - if ( _lowerBounds ) - { - for ( unsigned i = 0; i < _numberOfLayers; ++i ) - { - if ( _lowerBounds[i] ) - { - delete[] _lowerBounds[i]; - _lowerBounds[i] = NULL; - } - } - - delete[] _lowerBounds; - _lowerBounds = NULL; - } - - if ( _upperBounds ) - { - for ( unsigned i = 0; i < _numberOfLayers; ++i ) - { - if ( _upperBounds[i] ) - { - delete[] _upperBounds[i]; - _upperBounds[i] = NULL; - } - } - - delete[] _upperBounds; - _upperBounds = NULL; - } - - if ( _currentLayerLowerBounds ) - { - delete[] _currentLayerLowerBounds; - _currentLayerLowerBounds = NULL; - } - - if ( _currentLayerUpperBounds ) - { - delete[] _currentLayerUpperBounds; - _currentLayerUpperBounds = NULL; - } - - if ( _currentLayerLowerBias ) - { - delete[] _currentLayerLowerBias; - _currentLayerLowerBias = NULL; - } - - if ( _currentLayerUpperBias ) - { - delete[] _currentLayerUpperBias; - _currentLayerUpperBias = NULL; - } - - if ( _previousLayerLowerBounds ) - { - delete[] _previousLayerLowerBounds; - _previousLayerLowerBounds = NULL; - } - - if ( _previousLayerUpperBounds ) - { - delete[] _previousLayerUpperBounds; - _previousLayerUpperBounds = NULL; - } - - if ( _previousLayerLowerBias ) - { - delete[] _previousLayerLowerBias; - _previousLayerLowerBias = NULL; - } - - if ( _previousLayerUpperBias ) - { - delete[] _previousLayerUpperBias; - _previousLayerUpperBias = NULL; - } - - if ( _layerSizes ) - { - delete[] _layerSizes; - _layerSizes = NULL; - } - - _inputLowerBounds.clear(); - _inputUpperBounds.clear(); -} - -void SymbolicBoundTightener::setNumberOfLayers( unsigned layers ) -{ - freeMemoryIfNeeded(); - - _numberOfLayers = layers; - _layerSizes = new unsigned[layers]; - - std::fill_n( _layerSizes, layers, 0 ); -} - -void SymbolicBoundTightener::setLayerSize( unsigned layer, unsigned layerSize ) -{ - _layerSizes[layer] = layerSize; -} - -void SymbolicBoundTightener::allocateWeightAndBiasSpace() -{ - // Allocate biases - _biases = new double *[_numberOfLayers]; - for ( unsigned i = 0; i < _numberOfLayers; ++i ) - { - ASSERT( _layerSizes[i] > 0 ); - _biases[i] = new double[_layerSizes[i]]; - - std::fill_n( _biases[i], _layerSizes[i], 0 ); - } - - // Allocate weights - _weights = new WeightMatrix[_numberOfLayers - 1]; - for ( unsigned i = 0; i < _numberOfLayers - 1; ++i ) - { - // The rows represent the sources, the columns the targets - _weights[i]._rows = _layerSizes[i]; - _weights[i]._columns = _layerSizes[i+1]; - _weights[i]._positiveValues = new double[_weights[i]._rows * _weights[i]._columns]; - _weights[i]._negativeValues = new double[_weights[i]._rows * _weights[i]._columns]; - - std::fill_n( _weights[i]._positiveValues, _weights[i]._rows * _weights[i]._columns, 0 ); - std::fill_n( _weights[i]._negativeValues, _weights[i]._rows * _weights[i]._columns, 0 ); - } - - _lowerBounds = new double *[_numberOfLayers]; - for ( unsigned i = 0; i < _numberOfLayers; ++i ) - { - ASSERT( _layerSizes[i] > 0 ); - _lowerBounds[i] = new double[_layerSizes[i]]; - - std::fill_n( _lowerBounds[i], _layerSizes[i], 0 ); - } - - _upperBounds = new double *[_numberOfLayers]; - for ( unsigned i = 0; i < _numberOfLayers; ++i ) - { - ASSERT( _layerSizes[i] > 0 ); - _upperBounds[i] = new double[_layerSizes[i]]; - - std::fill_n( _upperBounds[i], _layerSizes[i], FloatUtils::infinity() ); - } - - // Allocate work space for the bound computation - _maxLayerSize = 0; - for ( unsigned i = 0; i < _numberOfLayers; ++i ) - if ( _maxLayerSize < _layerSizes[i] ) - _maxLayerSize = _layerSizes[i]; - - _inputLayerSize = _layerSizes[0]; - - _inputNeuronToIndex.clear(); - for ( unsigned i = 0; i < _inputLayerSize; ++i ) - _inputNeuronToIndex[i] = i; - - _currentLayerLowerBounds = new double[_maxLayerSize * _inputLayerSize]; - _currentLayerUpperBounds = new double[_maxLayerSize * _inputLayerSize]; - _currentLayerLowerBias = new double[_maxLayerSize]; - _currentLayerUpperBias = new double[_maxLayerSize]; - - _previousLayerLowerBounds = new double[_maxLayerSize * _inputLayerSize]; - _previousLayerUpperBounds = new double[_maxLayerSize * _inputLayerSize]; - _previousLayerLowerBias = new double[_maxLayerSize]; - _previousLayerUpperBias = new double[_maxLayerSize]; -} - -void SymbolicBoundTightener::setBias( unsigned layer, unsigned neuron, double bias ) -{ - //TODO: check that layer and neuron are not off bounds - _biases[layer][neuron] = bias; -} - -void SymbolicBoundTightener::setWeight( unsigned sourceLayer, unsigned sourceNeuron, unsigned targetNeuron, double weight ) -{ - if ( weight > 0 ) - _weights[sourceLayer]._positiveValues[sourceNeuron * _weights[sourceLayer]._columns + targetNeuron] = weight; - else - _weights[sourceLayer]._negativeValues[sourceNeuron * _weights[sourceLayer]._columns + targetNeuron] = weight; -} - -void SymbolicBoundTightener::setInputLowerBound( unsigned neuron, double bound ) -{ - ASSERT( _inputNeuronToIndex.exists( neuron ) ); - _inputLowerBounds[_inputNeuronToIndex[neuron]] = bound; -} - -void SymbolicBoundTightener::setInputUpperBound( unsigned neuron, double bound ) -{ - ASSERT( _inputNeuronToIndex.exists( neuron ) ); - _inputUpperBounds[_inputNeuronToIndex[neuron]] = bound; -} - -void SymbolicBoundTightener::run() -{ - run( GlobalConfiguration::USE_LINEAR_CONCRETIZATION ); -} - -void SymbolicBoundTightener::run( bool useLinearConcretization ) -{ - /* - Initialize the symbolic bounds for the first layer. Each variable has symbolic - upper and lower bound 1 for itself, 0 for all other varibales. - The input layer has no biases. - */ - std::fill_n( _previousLayerLowerBounds, _maxLayerSize * _inputLayerSize, 0 ); - std::fill_n( _previousLayerUpperBounds, _maxLayerSize * _inputLayerSize, 0 ); - for ( unsigned i = 0; i < _inputLayerSize; ++i ) - { - _previousLayerLowerBounds[i * _inputLayerSize + i] = 1; - _previousLayerUpperBounds[i * _inputLayerSize + i] = 1; - } - std::fill_n( _previousLayerLowerBias, _maxLayerSize, 0 ); - std::fill_n( _previousLayerUpperBias, _maxLayerSize, 0 ); - - log( "Initializing.\n\tLB matrix:\n" ); - for ( unsigned i = 0; i < _inputLayerSize; ++i ) - { - log( "\t" ); - for ( unsigned j = 0; j < _inputLayerSize; ++j ) - { - log( Stringf( "%.2lf ", _previousLayerLowerBounds[i*_inputLayerSize + j] ) ); - } - log( "\n" ); - } - log( "\nUB matrix:\n" ); - for ( unsigned i = 0; i < _inputLayerSize; ++i ) - { - log( "\t" ); - for ( unsigned j = 0; j < _inputLayerSize; ++j ) - { - log( Stringf( "%.2lf ", _previousLayerUpperBounds[i*_inputLayerSize + j] ) ); - } - log( "\n" ); - } - - for ( unsigned currentLayer = 1; currentLayer < _numberOfLayers; ++currentLayer ) - { - log( Stringf( "\nStarting work on layer %u\n", currentLayer ) ); - - unsigned currentLayerSize = _layerSizes[currentLayer]; - unsigned previousLayerSize = _layerSizes[currentLayer - 1]; - - /* - Computing symbolic bounds for layer i, based on layer i-1. - We assume that the bounds for the previous layer have been computed. - */ - - std::fill_n( _currentLayerLowerBounds, _maxLayerSize * _inputLayerSize, 0 ); - std::fill_n( _currentLayerUpperBounds, _maxLayerSize * _inputLayerSize, 0 ); - - // Grab the weights - WeightMatrix weights = _weights[currentLayer-1]; - - log( "Positive weights:\n" ); - for ( unsigned i = 0; i < _layerSizes[currentLayer - 1]; ++i ) - { - log( "\t" ); - for ( unsigned j = 0; j < _layerSizes[currentLayer]; ++j ) - { - log( Stringf( "%.2lf ", weights._positiveValues[i*_layerSizes[currentLayer] + j] ) ); - } - log( "\n" ); - } - log( "\nNegative weights:\n" ); - for ( unsigned i = 0; i < _layerSizes[currentLayer - 1]; ++i ) - { - log( "\t" ); - for ( unsigned j = 0; j < _layerSizes[currentLayer]; ++j ) - { - log( Stringf( "%.2lf ", weights._negativeValues[i*_layerSizes[currentLayer] + j] ) ); - } - log( "\n" ); - } - - /* - Perform the multiplication. - - newUB = oldUB * posWeights + oldLB * negWeights - newLB = oldUB * negWeights + oldLB * posWeights - - dimensions for oldUB and oldLB: inputLayerSize x previousLayerSize - dimensions for posWeights and negWeights: previousLayerSize x layerSize - - newUB, newLB dimensions: inputLayerSize x layerSize - */ - - for ( unsigned i = 0; i < _inputLayerSize; ++i ) - { - for ( unsigned j = 0; j < currentLayerSize; ++j ) - { - for ( unsigned k = 0; k < previousLayerSize; ++k ) - { - _currentLayerLowerBounds[i * currentLayerSize + j] += - _previousLayerUpperBounds[i * previousLayerSize + k] * - weights._negativeValues[k * currentLayerSize + j]; - - _currentLayerLowerBounds[i * currentLayerSize + j] += - _previousLayerLowerBounds[i * previousLayerSize + k] * - weights._positiveValues[k * currentLayerSize + j]; - - _currentLayerUpperBounds[i * currentLayerSize + j] += - _previousLayerUpperBounds[i * previousLayerSize + k] * - weights._positiveValues[k * currentLayerSize + j]; - - _currentLayerUpperBounds[i * currentLayerSize + j] += - _previousLayerLowerBounds[i * previousLayerSize + k] * - weights._negativeValues[k * currentLayerSize + j]; - } - } - } - - /* - Compute the biases for the new layer - */ - for ( unsigned j = 0; j < currentLayerSize; ++j ) - { - _currentLayerLowerBias[j] = _biases[currentLayer][j]; - _currentLayerUpperBias[j] = _biases[currentLayer][j]; - - // Add the weighted bias from the previous layer - for ( unsigned k = 0; k < previousLayerSize; ++k ) - { - double weight = weights._positiveValues[k * currentLayerSize + j] + weights._negativeValues[k * currentLayerSize + j]; - - if ( weight > 0 ) - { - _currentLayerLowerBias[j] += _previousLayerLowerBias[k] * weight; - _currentLayerUpperBias[j] += _previousLayerUpperBias[k] * weight; - } - else - { - _currentLayerLowerBias[j] += _previousLayerUpperBias[k] * weight; - _currentLayerUpperBias[j] += _previousLayerLowerBias[k] * weight; - } - } - } - - log( "\nAfter matrix multiplication, newLB is:\n" ); - for ( unsigned i = 0; i < _inputLayerSize; ++i ) - { - log( "\t" ); - for ( unsigned j = 0; j < _layerSizes[currentLayer]; ++j ) - { - log( Stringf( "%.2lf ", _currentLayerLowerBounds[i*_layerSizes[currentLayer] + j] ) ); - } - log( "\n" ); - } - log( "\nnew UB is:\n" ); - for ( unsigned i = 0; i < _inputLayerSize; ++i ) - { - log( "\t" ); - for ( unsigned j = 0; j < _layerSizes[currentLayer]; ++j ) - { - log( Stringf( "%.2lf ", _currentLayerUpperBounds[i*_layerSizes[currentLayer] + j] ) ); - } - log( "\n" ); - } - - // We now have the symbolic representation for the new layer. Next, we compute new lower - // and upper bounds for it. For each of these bounds, we compute an upper bound and a lower - // bound. - // - // newUB, newLB dimensions: inputLayerSize x layerSize - // - for ( unsigned i = 0; i < currentLayerSize; ++i ) - { - // lbLb: the lower bound for the expression of the lower bound - // lbUb: the upper bound for the expression of the lower bound - // etc - - double lbLb = 0; - double lbUb = 0; - double ubLb = 0; - double ubUb = 0; - - for ( unsigned j = 0; j < _inputLayerSize; ++j ) - { - double entry = _currentLayerLowerBounds[j * currentLayerSize + i]; - - if ( entry >= 0 ) - { - lbLb += ( entry * _inputLowerBounds[j] ); - lbUb += ( entry * _inputUpperBounds[j] ); - } - else - { - lbLb += ( entry * _inputUpperBounds[j] ); - lbUb += ( entry * _inputLowerBounds[j] ); - } - - lbLb -= GlobalConfiguration::SYMBOLIC_TIGHTENING_ROUNDING_CONSTANT; - lbUb += GlobalConfiguration::SYMBOLIC_TIGHTENING_ROUNDING_CONSTANT; - - entry = _currentLayerUpperBounds[j * currentLayerSize + i]; - - if ( entry >= 0 ) - { - ubLb += ( entry * _inputLowerBounds[j] ); - ubUb += ( entry * _inputUpperBounds[j] ); - } - else - { - ubLb += ( entry * _inputUpperBounds[j] ); - ubUb += ( entry * _inputLowerBounds[j] ); - } - - ubLb -= GlobalConfiguration::SYMBOLIC_TIGHTENING_ROUNDING_CONSTANT; - ubUb += GlobalConfiguration::SYMBOLIC_TIGHTENING_ROUNDING_CONSTANT; - } - - // Add the network bias to all bounds - lbLb += _currentLayerLowerBias[i]; - lbUb += _currentLayerLowerBias[i]; - ubLb += _currentLayerUpperBias[i]; - ubUb += _currentLayerUpperBias[i]; - - log( Stringf( "Neuron %u: Computed concrete lb: %lf, ub: %lf\n", i, lbLb, ubUb ) ); - - // Handle the ReLU activation. We know that: - // lbLb <= true LB <= lbUb - // ubLb <= true UB <= ubUb - - if ( currentLayer < _numberOfLayers - 1 ) - { - NodeIndex reluIndex( currentLayer, i ); - - ReluConstraint::PhaseStatus reluPhase = ReluConstraint::PHASE_NOT_FIXED; - if ( _nodeIndexToEliminatedReluState.exists( reluIndex ) ) - { - reluPhase = _nodeIndexToEliminatedReluState[reluIndex]; - ASSERT( reluPhase != ReluConstraint::PHASE_NOT_FIXED ); - } - else if ( _nodeIndexToReluState.exists( reluIndex ) ) - { - reluPhase = _nodeIndexToReluState[reluIndex]; - } - - // If the ReLU phase is not fixed yet, do the usual propagation: - if ( reluPhase == ReluConstraint::PHASE_NOT_FIXED ) - { - if ( ubUb <= 0 ) - { - // lb <= ub <= 0 - // The ReLU will zero this entry out - lbLb = 0; - lbUb = 0; - ubLb = 0; - ubUb = 0; - - for ( unsigned j = 0; j < _inputLayerSize; ++j ) - { - _currentLayerLowerBounds[j * currentLayerSize + i] = 0; - _currentLayerUpperBounds[j * currentLayerSize + i] = 0; - } - _currentLayerLowerBias[i] = 0; - _currentLayerUpperBias[i] = 0; - } - else if ( lbLb >= 0 ) - { - // 0 <= lb <= ub - // The ReLU will not affect this entry - - log( "SBT: eliminated nothing!\n" ); - } - else - { - // lbLb < 0 < ubUb - // The ReLU might affect this entry, we need to figure out how - - if ( ubLb < 0 ) - { - // ubLb < 0 < ubUb - if ( useLinearConcretization ) - { - // Concretize the upper bound using the Ehler's-like sapproximation - - for ( unsigned j = 0; j < _inputLayerSize; ++j ) - _currentLayerUpperBounds[j * currentLayerSize + i] = - _currentLayerUpperBounds[j * currentLayerSize + i] * ubUb / ( ubUb - ubLb ); - - // Do the same for the bias, and then adjust - _currentLayerUpperBias[i] = _currentLayerUpperBias[i] * ubUb / ( ubUb - ubLb ); - _currentLayerUpperBias[i] -= ubUb * ubLb / ( ubUb - ubLb ); - } - else - { - // No linear concretization // - - // The upper bound range goes below 0, we we need to zero it out - for ( unsigned j = 0; j < _inputLayerSize; ++j ) - _currentLayerUpperBounds[j * currentLayerSize + i] = 0; - - // We keep the concrete maxiaml value of the upper bound as the bias for this layer - _currentLayerUpperBias[i] = ubUb; - } - } - else - { - log( "SBT: did not eliminate upper!\n" ); - } - - if ( useLinearConcretization ) - { - if ( lbUb < 0 ) - { - for ( unsigned j = 0; j < _inputLayerSize; ++j ) - _currentLayerLowerBounds[j * currentLayerSize + i] = 0; - - _currentLayerLowerBias[i] = 0; - } - else - { - for ( unsigned j = 0; j < _inputLayerSize; ++j ) - _currentLayerLowerBounds[j * currentLayerSize + i] = - _currentLayerLowerBounds[j * currentLayerSize + i] * lbUb / ( lbUb - lbLb ); - - _currentLayerLowerBias[i] = _currentLayerLowerBias[i] * lbUb / ( lbUb - lbLb ); - } - } - else - { - // No linear concretization // - - // The lower bound can be negative, so it is zeroed out also - for ( unsigned j = 0; j < _inputLayerSize; ++j ) - _currentLayerLowerBounds[j * currentLayerSize + i] = 0; - - _currentLayerLowerBias[i] = 0; - } - - lbLb = 0; - } - - log( Stringf( "\tAfter ReLU: concrete lb: %lf, ub: %lf\n", lbLb, ubUb ) ); - } - else - { - // The phase of this ReLU is fixed! - if ( reluPhase == ReluConstraint::PHASE_ACTIVE ) - { - // printf( "Relu <%u,%u> is ACTIVE, leaving equations as is\n", reluIndex._layer, reluIndex._neuron ); - // Active ReLU, bounds are propagated as is - } - else - { - // printf( "Relu <%u,%u> is INACTIVE, zeroing out equations\n", reluIndex._layer, reluIndex._neuron ); - - // Inactive ReLU, returns zero - lbLb = 0; - lbUb = 0; - ubLb = 0; - ubUb = 0; - - for ( unsigned j = 0; j < _inputLayerSize; ++j ) - { - _currentLayerLowerBounds[j * currentLayerSize + i] = 0; - _currentLayerUpperBounds[j * currentLayerSize + i] = 0; - } - _currentLayerLowerBias[i] = 0; - _currentLayerUpperBias[i] = 0; - } - - log( Stringf( "\tAfter phase-fixed ReLU: concrete lb: %lf, ub: %lf\n", lbLb, ubUb ) ); - } - } - - // Store the bounds for this neuron - _lowerBounds[currentLayer][i] = lbLb; - _upperBounds[currentLayer][i] = ubUb; - } - - log( "Dumping current layer upper bounds, before copy:\n" ); - for ( unsigned i = 0; i < _maxLayerSize * _inputLayerSize; ++i ) - log( Stringf( "%.5lf ", _currentLayerUpperBounds[i] ) ); - log( "\n\n" ); - - // Prepare for next iteration - memcpy( _previousLayerLowerBounds, _currentLayerLowerBounds, sizeof(double) * _maxLayerSize * _inputLayerSize ); - memcpy( _previousLayerUpperBounds, _currentLayerUpperBounds, sizeof(double) * _maxLayerSize * _inputLayerSize ); - memcpy( _previousLayerLowerBias, _currentLayerLowerBias, sizeof(double) * _maxLayerSize ); - memcpy( _previousLayerUpperBias, _currentLayerUpperBias, sizeof(double) * _maxLayerSize ); - } -} - -double SymbolicBoundTightener::getLowerBound( unsigned layer, unsigned neuron ) const -{ - return _lowerBounds[layer][neuron]; -} - -double SymbolicBoundTightener::getUpperBound( unsigned layer, unsigned neuron ) const -{ - return _upperBounds[layer][neuron]; -} - -void SymbolicBoundTightener::log( const String &message ) -{ - if ( GlobalConfiguration::SYMBOLIC_BOUND_TIGHTENER_LOGGING ) - printf( "%s", message.ascii() ); -} - -void SymbolicBoundTightener::clearReluStatuses() -{ - _nodeIndexToReluState.clear(); -} - -void SymbolicBoundTightener::setReluStatus( unsigned layer, unsigned neuron, ReluConstraint::PhaseStatus status ) -{ - _nodeIndexToReluState[NodeIndex( layer, neuron )] = status; -} - -void SymbolicBoundTightener::setReluBVariable( unsigned layer, unsigned neuron, unsigned b ) -{ - _nodeIndexToBVariable[NodeIndex( layer, neuron )] = b; - _bVariableToNodeIndex[b] = NodeIndex( layer, neuron ); -} - -void SymbolicBoundTightener::setReluFVariable( unsigned layer, unsigned neuron, unsigned f ) -{ - _nodeIndexToFVariable[NodeIndex( layer, neuron )] = f; -} - -SymbolicBoundTightener::NodeIndex SymbolicBoundTightener::nodeIndexFromB( unsigned b ) const -{ - if ( !_bVariableToNodeIndex.exists( b ) ) - throw MarabouError( MarabouError::SYMBOLIC_BOUND_TIGHTENER_UNKNOWN_VARIABLE_INDEX ); - - return _bVariableToNodeIndex.at( b ); -} - -void SymbolicBoundTightener::setEliminatedRelu( unsigned layer, unsigned neuron, ReluConstraint::PhaseStatus status ) -{ - ASSERT( status != ReluConstraint::PHASE_NOT_FIXED ); - _nodeIndexToEliminatedReluState[NodeIndex( layer, neuron )] = status; -} - -void SymbolicBoundTightener::updateVariableIndices( const Map &oldIndexToNewIndex, - const Map &mergedVariables, - const Map &fixedVariableValues ) -{ - // First, do a pass to handle any merged variables - auto bIt = _nodeIndexToBVariable.begin(); - while ( bIt != _nodeIndexToBVariable.end() ) - { - unsigned b = bIt->second; - - if ( mergedVariables.exists( b ) ) - bIt->second = mergedVariables[b]; - - ++bIt; - } - - auto fIt = _nodeIndexToFVariable.begin(); - while ( fIt != _nodeIndexToFVariable.end() ) - { - unsigned f = fIt->second; - - if ( mergedVariables.exists( f ) ) - fIt->second = mergedVariables[f]; - - ++fIt; - } - - // Now handle re-indexing - bIt = _nodeIndexToBVariable.begin(); - while ( bIt != _nodeIndexToBVariable.end() ) - { - unsigned b = bIt->second; - - if ( !oldIndexToNewIndex.exists( b ) ) - { - // This variable has been eliminated, remove from map - bIt = _nodeIndexToBVariable.erase( bIt ); - } - else - { - if ( oldIndexToNewIndex[b] == b ) - { - // Index hasn't changed, skip - } - else - { - // Index has changed - bIt->second = oldIndexToNewIndex[b]; - } - - ++bIt; - continue; - } - } - - fIt = _nodeIndexToFVariable.begin(); - while ( fIt != _nodeIndexToFVariable.end() ) - { - unsigned f = fIt->second; - if ( !oldIndexToNewIndex.exists( f ) ) - { - // This variable has been eliminated, remove from map - fIt = _nodeIndexToFVariable.erase( fIt ); - } - else - { - if ( oldIndexToNewIndex[f] == f ) - { - // Index hasn't changed, skip - } - else - { - // Index has changed - fIt->second = oldIndexToNewIndex[f]; - } - - ++fIt; - continue; - } - } - - // Recreate the inverse B map - _bVariableToNodeIndex.clear(); - for ( const auto &entry : _nodeIndexToBVariable ) - _bVariableToNodeIndex[entry.second] = entry.first; - - // Set the lower and upper bound for any input variable that has become fixed - for ( unsigned i = 0; i < _inputLayerSize; ++i ) - { - if ( mergedVariables.exists( i ) ) - throw MarabouError( MarabouError::MERGED_INPUT_VARIABLE ); - - if ( !oldIndexToNewIndex.exists( i ) ) - { - log( Stringf( "Input %u permanently fixed to value %lf\n", i, fixedVariableValues[i] ) ); - - // The i'th variable has been fixed. - _inputLowerBounds[i] = fixedVariableValues[i]; - _inputUpperBounds[i] = fixedVariableValues[i]; - - // If input neuron 3 has become fixed, later when we hear something - // about neuron 3 it will actually refer to neuron 4 - for ( unsigned j = i; j < _inputLayerSize; ++j ) - ++_inputNeuronToIndex[j]; - - _inputNeuronToIndex.erase( _inputNeuronToIndex.size() - 1 ); - } - } -} - -const Map &SymbolicBoundTightener::getNodeIndexToFMapping() const -{ - return _nodeIndexToFVariable; -} - -void SymbolicBoundTightener::storeIntoOther( SymbolicBoundTightener &other ) const -{ - other.freeMemoryIfNeeded(); - - other.setNumberOfLayers( _numberOfLayers ); - - for ( unsigned i = 0; i < _numberOfLayers; ++i ) - other.setLayerSize( i, _layerSizes[i] ); - - other.allocateWeightAndBiasSpace(); - - for ( unsigned i = 0; i < _numberOfLayers - 1; ++i ) - { - unsigned rows = _weights[i]._rows; - unsigned columns = _weights[i]._columns; - - other._weights[i]._rows = rows; - other._weights[i]._columns = columns; - - memcpy( other._weights[i]._positiveValues, _weights[i]._positiveValues, sizeof(double) * rows * columns ); - memcpy( other._weights[i]._negativeValues, _weights[i]._negativeValues, sizeof(double) * rows * columns ); - } - - for ( unsigned i = 0; i < _numberOfLayers; ++i ) - { - memcpy( other._biases[i], _biases[i], sizeof(double) * _layerSizes[i] ); - } - - other._inputLayerSize = _inputLayerSize; - other._maxLayerSize = _maxLayerSize; - other._inputLowerBounds = _inputLowerBounds; - other._inputUpperBounds = _inputUpperBounds; - - for ( unsigned i = 0; i < _numberOfLayers; ++i ) - { - memcpy( other._lowerBounds[i], _lowerBounds[i], sizeof(double) * _layerSizes[i] ); - memcpy( other._upperBounds[i], _upperBounds[i], sizeof(double) * _layerSizes[i] ); - } - - other._nodeIndexToBVariable = _nodeIndexToBVariable; - other._nodeIndexToFVariable = _nodeIndexToFVariable; - other._bVariableToNodeIndex = _bVariableToNodeIndex; - - other._nodeIndexToReluState = _nodeIndexToReluState; - other._nodeIndexToEliminatedReluState = _nodeIndexToEliminatedReluState; - - other._inputNeuronToIndex = _inputNeuronToIndex; -} - -// -// Local Variables: -// compile-command: "make -C ../.. " -// tags-file-name: "../../TAGS" -// c-basic-offset: 4 -// End: -// diff --git a/src/engine/SymbolicBoundTightener.h b/src/engine/SymbolicBoundTightener.h deleted file mode 100644 index cb8195655..000000000 --- a/src/engine/SymbolicBoundTightener.h +++ /dev/null @@ -1,190 +0,0 @@ -/********************* */ -/*! \file SymbolicBoundTightener.h - ** \verbatim - ** Top contributors (to current version): - ** Guy Katz - ** This file is part of the Marabou project. - ** Copyright (c) 2017-2019 by the authors listed in the file AUTHORS - ** in the top-level source directory) and their institutional affiliations. - ** All rights reserved. See the file COPYING in the top-level source - ** directory for licensing information.\endverbatim - ** - ** [[ Add lengthier description here ]] - -**/ - -#ifndef __SymbolicBoundTightener_h__ -#define __SymbolicBoundTightener_h__ - -#include "MString.h" -#include "Map.h" - -// Todo: remove this include later -#include "ReluConstraint.h" - -/* - A utility class for performing symbolic bound tightening. - It currently makes the following assumptions: - - 1. The network only has ReLU activations functions - 2. The network is fully connected - 3. An external caller has stored the weights and topology -*/ - -class SymbolicBoundTightener -{ -public: - struct WeightMatrix - { - double *_positiveValues; - double *_negativeValues; - unsigned _rows; - unsigned _columns; - }; - - struct NodeIndex - { - NodeIndex() - : _layer( 0 ) - , _neuron( 0 ) - { - } - - NodeIndex( unsigned layer, unsigned neuron ) - : _layer( layer ) - , _neuron( neuron ) - { - } - - unsigned _layer; - unsigned _neuron; - - bool operator<( const NodeIndex &other ) const - { - if ( _layer != other._layer ) - return _layer < other._layer; - - return _neuron < other._neuron; - } - }; - - SymbolicBoundTightener(); - ~SymbolicBoundTightener(); - - /* - Initialization methods for reporting the topology of the network: - - - Number of layers - - Layer sizes - - Allocating the required internal memory - - Biases and weights - - Lower and upper bounds for input neurons - */ - void setNumberOfLayers( unsigned layers ); - void setLayerSize( unsigned layer, unsigned layerSize ); - void allocateWeightAndBiasSpace(); - void setBias( unsigned layer, unsigned neuron, double bias ); - void setWeight( unsigned sourceLayer, unsigned sourceNeuron, unsigned targetNeuron, double weight ); - void setInputLowerBound( unsigned neuron, double bound ); - void setInputUpperBound( unsigned neuron, double bound ); - - /* - Setting the connections between the network topology and the simplex variables - */ - void setReluBVariable( unsigned layer, unsigned neuron, unsigned b ); - void setReluFVariable( unsigned layer, unsigned neuron, unsigned f ); - - NodeIndex nodeIndexFromB( unsigned b ) const; - const Map &getNodeIndexToFMapping() const; - - void updateVariableIndices( const Map &oldIndexToNewIndex, - const Map &mergedVariables, - const Map &fixedVariableValues ); - - /* - Report that a ReLU constraint has become permanently fixed (i.e., at decision level 0) - */ - void setEliminatedRelu( unsigned layer, unsigned neuron, ReluConstraint::PhaseStatus status ); - - /* - Prior to running, we can use these methods to report any ReLUs that have becomes - fixed, or to clear previously-reported information - */ - void setReluStatus( unsigned layer, unsigned neuron, ReluConstraint::PhaseStatus status ); - void clearReluStatuses(); - - /* - Running the tool, with or without linear concertization - */ - void run(); - void run( bool useLinearConcretization ); - - /* - After running the tools, these methods will extract the discovered - bounds for every neuron - */ - double getLowerBound( unsigned layer, unsigned neuron ) const; - double getUpperBound( unsigned layer, unsigned neuron ) const; - - /* - Duplicate the tightener - */ - void storeIntoOther( SymbolicBoundTightener &other ) const; - -private: - // The number of layers and their sizes - unsigned _numberOfLayers; - unsigned *_layerSizes; - unsigned _inputLayerSize; - unsigned _maxLayerSize; - - // The network's weights and biases - double **_biases; - WeightMatrix *_weights; - - // Lower and upper bounds for input neurons - Map _inputLowerBounds; - Map _inputUpperBounds; - - // Lower and upper bounds for internal neurons - double **_lowerBounds; - double **_upperBounds; - - // Mapping from ReLUs to simplex variables - Map _nodeIndexToBVariable; - Map _nodeIndexToFVariable; - Map _bVariableToNodeIndex; - - // Information about the phase statuses of ReLU nodes - Map _nodeIndexToReluState; - Map _nodeIndexToEliminatedReluState; - - // To account for input variable renaming as part of preprocessing - Map _inputNeuronToIndex; - - /* - Work space for the bound computation - */ - double *_currentLayerLowerBounds; - double *_currentLayerUpperBounds; - double *_currentLayerLowerBias; - double *_currentLayerUpperBias; - - double *_previousLayerLowerBounds; - double *_previousLayerUpperBounds; - double *_previousLayerLowerBias; - double *_previousLayerUpperBias; - - void freeMemoryIfNeeded(); - static void log( const String &message ); -}; - -#endif // __SymbolicBoundTightener_h__ - -// -// Local Variables: -// compile-command: "make -C ../.. " -// tags-file-name: "../../TAGS" -// c-basic-offset: 4 -// End: -// diff --git a/src/engine/tests/Test_NetworkLevelReasoner.h b/src/engine/tests/Test_NetworkLevelReasoner.h index aca17c2b3..251d5c3bc 100644 --- a/src/engine/tests/Test_NetworkLevelReasoner.h +++ b/src/engine/tests/Test_NetworkLevelReasoner.h @@ -207,16 +207,30 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite MockTableau tableau; - // Initialize the input bounds + // Initialize the bounds tableau.setLowerBound( 0, -1 ); tableau.setUpperBound( 0, 1 ); tableau.setLowerBound( 1, -1 ); tableau.setUpperBound( 1, 1 ); + double large = 1000; + tableau.setLowerBound( 2, -large ); tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); tableau.setUpperBound( 13, large ); + nlr.setTableau( &tableau ); // Initialize - TS_ASSERT_THROWS_NOTHING( nlr.obtainInputBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); // Perform the tightening pass TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); @@ -257,14 +271,27 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT_EQUALS( expectedBounds, bounds ); - // Change the input bounds + // Change the current bounds tableau.setLowerBound( 0, -3 ); tableau.setUpperBound( 0, 1 ); tableau.setLowerBound( 1, -1 ); tableau.setUpperBound( 1, 2 ); + tableau.setLowerBound( 2, -large ); tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); tableau.setUpperBound( 6, large ); + tableau.setLowerBound( 7, -large ); tableau.setUpperBound( 7, large ); + tableau.setLowerBound( 8, -large ); tableau.setUpperBound( 8, large ); + tableau.setLowerBound( 9, -large ); tableau.setUpperBound( 9, large ); + tableau.setLowerBound( 10, -large ); tableau.setUpperBound( 10, large ); + tableau.setLowerBound( 11, -large ); tableau.setUpperBound( 11, large ); + tableau.setLowerBound( 12, -large ); tableau.setUpperBound( 12, large ); + tableau.setLowerBound( 13, -large ); tableau.setUpperBound( 13, large ); + // Initialize - TS_ASSERT_THROWS_NOTHING( nlr.obtainInputBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); // Perform the tightening pass TS_ASSERT_THROWS_NOTHING( nlr.intervalArithmeticBoundPropagation() ); @@ -304,6 +331,356 @@ class NetworkLevelReasonerTestSuite : public CxxTest::TestSuite TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); TS_ASSERT_EQUALS( expectedBounds2, bounds ); } + + void populateNetworkSBT( NetworkLevelReasoner &nlr, MockTableau &tableau ) + { + /* + 2 R 1 + x0 --- x2 ---> x4 --- x6 + \ / / + 1 \ / / + \/ -1 / + /\ / + 3 / \ / + / \ R / + x1 --- x3 ---> x5 + 1 + */ + + nlr.setNumberOfLayers( 3 ); + + nlr.setLayerSize( 0, 2 ); + nlr.setLayerSize( 1, 2 ); + nlr.setLayerSize( 2, 1 ); + + nlr.allocateMemoryByTopology(); + + // Weights + nlr.setWeight( 0, 0, 0, 2 ); + nlr.setWeight( 0, 0, 1, 1 ); + nlr.setWeight( 0, 1, 0, 3 ); + nlr.setWeight( 0, 1, 1, 1 ); + nlr.setWeight( 1, 0, 0, 1 ); + nlr.setWeight( 1, 1, 0, -1 ); + + // All biases are 0 + nlr.setBias( 0, 0, 0 ); + nlr.setBias( 0, 1, 0 ); + nlr.setBias( 1, 0, 0 ); + nlr.setBias( 1, 1, 0 ); + nlr.setBias( 2, 0, 0 ); + + // Variable indexing + nlr.setActivationResultVariable( 0, 0, 0 ); + nlr.setActivationResultVariable( 0, 1, 1 ); + + nlr.setWeightedSumVariable( 1, 0, 2 ); + nlr.setWeightedSumVariable( 1, 1, 3 ); + nlr.setActivationResultVariable( 1, 0, 4 ); + nlr.setActivationResultVariable( 1, 1, 5 ); + + nlr.setWeightedSumVariable( 2, 0, 6 ); + + // Mark nodes as ReLUs + nlr.setNeuronActivationFunction( 1, 0, NetworkLevelReasoner::ReLU ); + nlr.setNeuronActivationFunction( 1, 1, NetworkLevelReasoner::ReLU ); + + // Very loose bounds for neurons except inputs + double large = 1000000; + + tableau.setLowerBound( 2, -large ); tableau.setUpperBound( 2, large ); + tableau.setLowerBound( 3, -large ); tableau.setUpperBound( 3, large ); + tableau.setLowerBound( 4, -large ); tableau.setUpperBound( 4, large ); + tableau.setLowerBound( 5, -large ); tableau.setUpperBound( 5, large ); + tableau.setLowerBound( 6, -large ); tableau.setUpperBound( 6, large ); + } + + void test_sbt_all_active() + { + NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBT( nlr, tableau ); + + tableau.setLowerBound( 0, 4 ); + tableau.setUpperBound( 0, 6 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 5 ); + + // Invoke SBT + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); + + /* + Input ranges: + + x0: [4, 6] + x1: [1, 5] + + Layer 1: + + x2.lb = 2x0 + 3x1 : [11, 27] + x2.ub = 2x0 + 3x1 : [11, 27] + + x3.lb = x0 + x1 : [5, 11] + x3.ub = x0 + x1 : [5, 11] + + Both ReLUs active, bound survive through activations: + + x4.lb = 2x0 + 3x1 : [11, 27] + x4.ub = 2x0 + 3x1 : [11, 27] + + x5.lb = x0 + x1 : [5, 11] + x5.ub = x0 + x1 : [5, 11] + + Layer 2: + + x6.lb = x0 + 2x1 : [6, 16] + x6.ub = x0 + 2x1 : [6, 16] + */ + + List expectedBounds({ + Tightening( 2, 11, Tightening::LB ), + Tightening( 2, 27, Tightening::UB ), + Tightening( 3, 5, Tightening::LB ), + Tightening( 3, 11, Tightening::UB ), + + Tightening( 4, 11, Tightening::LB ), + Tightening( 4, 27, Tightening::UB ), + Tightening( 5, 5, Tightening::LB ), + Tightening( 5, 11, Tightening::UB ), + + Tightening( 6, 6, Tightening::LB ), + Tightening( 6, 16, Tightening::UB ), + }); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + + TS_ASSERT_EQUALS( expectedBounds.size(), bounds.size() ); + for ( const auto &bound : bounds ) + TS_ASSERT( expectedBounds.exists( bound ) ); + } + + void test_sbt_active_and_inactive() + { + NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBT( nlr, tableau ); + + tableau.setLowerBound( 0, 4 ); + tableau.setUpperBound( 0, 6 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 5 ); + + // Strong negative bias for x2, which is node (1,0) + nlr.setBias( 1, 0, -30 ); + + // Invoke SBT + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); + + /* + Input ranges: + + x0: [4, 6] + x1: [1, 5] + + Layer 1: + + x2.lb = 2x0 + 3x1 - 30 : [-19, -3] + x2.ub = 2x0 + 3x1 - 30 : [-19, -3] + + x3.lb = x0 + x1 : [5, 11] + x3.ub = x0 + x1 : [5, 11] + + First ReLU is inactive, bounds get zeroed + Second ReLU is active, bounds surive the activation + + x4.lb = 0 + x4.ub = 0 + + x5.lb = x0 + x1 : [5, 11] + x5.ub = x0 + x1 : [5, 11] + + Layer 2: + + x6.lb = - x0 - x1 : [-11, -5] + x6.ub = - x0 - x1 : [-11, -5] + */ + + List expectedBounds({ + Tightening( 2, -19, Tightening::LB ), + Tightening( 2, -3, Tightening::UB ), + Tightening( 3, 5, Tightening::LB ), + Tightening( 3, 11, Tightening::UB ), + + Tightening( 4, 0, Tightening::LB ), + Tightening( 4, 0, Tightening::UB ), + Tightening( 5, 5, Tightening::LB ), + Tightening( 5, 11, Tightening::UB ), + + Tightening( 6, -11, Tightening::LB ), + Tightening( 6, -5, Tightening::UB ), + }); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + + TS_ASSERT_EQUALS( expectedBounds.size(), bounds.size() ); + for ( const auto &bound : bounds ) + TS_ASSERT( expectedBounds.exists( bound ) ); + } + + void test_sbt_active_and_not_fixed() + { + NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBT( nlr, tableau ); + + tableau.setLowerBound( 0, 4 ); + tableau.setUpperBound( 0, 6 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 5 ); + + // Strong negative bias for x2, which is node (1,0) + nlr.setBias( 1, 0, -15 ); + + // Invoke SBT + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); + + /* + Input ranges: + + x0: [4, 6] + x1: [1, 5] + + Layer 1: + + x2.lb = 2x0 + 3x1 - 15 : [-4, 12] + x2.ub = 2x0 + 3x1 - 15 : [-4, 12] + + x3.lb = x0 + x1 : [5, 11] + x3.ub = x0 + x1 : [5, 11] + + First ReLU is undecided, bound is concretized. + Coefficient: 12/(12--4) = 12/16 = 0.75 + Second ReLU is active, bounds surive the activation + + x4 range: [0, 12] + x4.lb = 0.75( 2x0 + 3x1 ) - 0.75 * 15 = 1.5x0 + 2.25x1 - 11.25 + x4.ub = 0.75( 2x0 + 3x1 ) - 0.75 * 15 + 3 = 1.5x0 + 2.25x1 - 8.25 + + x5.lb = x0 + x1 : [5, 11] + x5.ub = x0 + x1 : [5, 11] + + Layer 2: + + x6.lb = 0.5x0 + 1.25x1 - 11.25 + x6.ub = 0.5x0 + 1.25x1 - 8.25 + + x6 range: [2 + 1.25 - 11.25 = -8, 3 + 6.25 - 8.25 = 1] = [-8, 1] + */ + + List expectedBounds({ + Tightening( 2, -4, Tightening::LB ), + Tightening( 2, 12, Tightening::UB ), + Tightening( 3, 5, Tightening::LB ), + Tightening( 3, 11, Tightening::UB ), + + Tightening( 4, 0, Tightening::LB ), + Tightening( 4, 12, Tightening::UB ), + Tightening( 5, 5, Tightening::LB ), + Tightening( 5, 11, Tightening::UB ), + + Tightening( 6, -8, Tightening::LB ), + Tightening( 6, 1, Tightening::UB ), + }); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + + TS_ASSERT_EQUALS( expectedBounds.size(), bounds.size() ); + for ( const auto &bound : bounds ) + TS_ASSERT( expectedBounds.exists( bound ) ); + } + + void test_sbt_active_and_externally_fixed() + { + NetworkLevelReasoner nlr; + MockTableau tableau; + nlr.setTableau( &tableau ); + populateNetworkSBT( nlr, tableau ); + + tableau.setLowerBound( 0, 4 ); + tableau.setUpperBound( 0, 6 ); + tableau.setLowerBound( 1, 1 ); + tableau.setUpperBound( 1, 5 ); + + // Strong negative bias for x2, which is node (1,0). Should make the node unfixed. + nlr.setBias( 1, 0, -15 ); + + // However, one of the ReLU's variables has been eliminated + nlr.eliminateVariable( 2, -3 ); + + // Invoke SBT + TS_ASSERT_THROWS_NOTHING( nlr.obtainCurrentBounds() ); + TS_ASSERT_THROWS_NOTHING( nlr.symbolicBoundPropagation() ); + + /* + Input ranges: + + x0: [4, 6] + x1: [1, 5] + + Layer 1: + + x2.lb = 2x0 + 3x1 - 15 : [-4, 12] + x2.ub = 2x0 + 3x1 - 15 : [-4, 12] + + x3.lb = x0 + x1 : [5, 11] + x3.ub = x0 + x1 : [5, 11] + + First ReLU is inactive (set externalyl), bounds get zeroed + Second ReLU is active, bounds surive the activation + + x4.lb = 0 + x4.ub = 0 + + x5.lb = x0 + x1 : [5, 11] + x5.ub = x0 + x1 : [5, 11] + + Layer 2: + + x6.lb = - x0 - x1 : [-11, -5] + x6.ub = - x0 - x1 : [-11, -5] + */ + + List expectedBounds({ + // x2 does not appear, because it has been eliminated + + Tightening( 3, 5, Tightening::LB ), + Tightening( 3, 11, Tightening::UB ), + + Tightening( 4, 0, Tightening::LB ), + Tightening( 4, 0, Tightening::UB ), + Tightening( 5, 5, Tightening::LB ), + Tightening( 5, 11, Tightening::UB ), + + Tightening( 6, -11, Tightening::LB ), + Tightening( 6, -5, Tightening::UB ), + }); + + List bounds; + TS_ASSERT_THROWS_NOTHING( nlr.getConstraintTightenings( bounds ) ); + + TS_ASSERT_EQUALS( expectedBounds.size(), bounds.size() ); + for ( const auto &bound : bounds ) + TS_ASSERT( expectedBounds.exists( bound ) ); + } }; // diff --git a/src/engine/tests/Test_SymbolicBoundTightener.h b/src/engine/tests/Test_SymbolicBoundTightener.h deleted file mode 100644 index 82b7082ea..000000000 --- a/src/engine/tests/Test_SymbolicBoundTightener.h +++ /dev/null @@ -1,301 +0,0 @@ -/********************* */ -/*! \file Test_SymbolicBoundTightener.h - ** \verbatim - ** Top contributors (to current version): - ** Guy Katz - ** This file is part of the Marabou project. - ** Copyright (c) 2017-2019 by the authors listed in the file AUTHORS - ** in the top-level source directory) and their institutional affiliations. - ** All rights reserved. See the file COPYING in the top-level source - ** directory for licensing information.\endverbatim - ** - ** [[ Add lengthier description here ]] - -**/ - -#include - -#include "FloatUtils.h" -#include "SymbolicBoundTightener.h" - -class MockForSymbolicBoundTightener -{ -public: -}; - -class SymbolicBoundTightenerTestSuite : public CxxTest::TestSuite -{ -public: - MockForSymbolicBoundTightener *mock; - - void setUp() - { - TS_ASSERT( mock = new MockForSymbolicBoundTightener ); - } - - void tearDown() - { - TS_ASSERT_THROWS_NOTHING( delete mock ); - } - - void test_all_lower_bounds_positive() - { - // This simple example from the ReluVal paper - - SymbolicBoundTightener sbt; - - sbt.setNumberOfLayers( 3 ); - sbt.setLayerSize( 0, 2 ); - sbt.setLayerSize( 1, 2 ); - sbt.setLayerSize( 2, 1 ); - - sbt.allocateWeightAndBiasSpace(); - - // All biases are 0 - sbt.setBias( 0, 0, 0 ); - sbt.setBias( 0, 1, 0 ); - sbt.setBias( 1, 0, 0 ); - sbt.setBias( 1, 1, 0 ); - sbt.setBias( 2, 0, 0 ); - - // Weights - sbt.setWeight( 0, 0, 0, 2 ); - sbt.setWeight( 0, 0, 1, 1 ); - sbt.setWeight( 0, 1, 0, 3 ); - sbt.setWeight( 0, 1, 1, 1 ); - sbt.setWeight( 1, 0, 0, 1 ); - sbt.setWeight( 1, 1, 0, -1 ); - - // Initial bounds - sbt.setInputLowerBound( 0, 4 ); - sbt.setInputUpperBound( 0, 6 ); - sbt.setInputLowerBound( 1, 1 ); - sbt.setInputUpperBound( 1, 5 ); - - // Run the tightener - TS_ASSERT_THROWS_NOTHING( sbt.run() ); - - // Expected range: [6, 16], +- epsilon - TS_ASSERT( sbt.getLowerBound( 2, 0 ) < 6 ); - TS_ASSERT( sbt.getLowerBound( 2, 0 ) > 6 - 0.001 ); - TS_ASSERT( sbt.getUpperBound( 2, 0 ) > 16 ); - TS_ASSERT( sbt.getUpperBound( 2, 0 ) < 16 + 0.001 ); - } - - void test_negative_lower_bounds_get_zeroed() - { - // This simple example from the ReluVal paper - - SymbolicBoundTightener sbt; - - sbt.setNumberOfLayers( 3 ); - sbt.setLayerSize( 0, 2 ); - sbt.setLayerSize( 1, 2 ); - sbt.setLayerSize( 2, 1 ); - - sbt.allocateWeightAndBiasSpace(); - - // All biases are 0 - sbt.setBias( 0, 0, 0 ); - sbt.setBias( 0, 1, 0 ); - sbt.setBias( 1, 0, -15 ); // Strong negative bias for node (1,0) - sbt.setBias( 1, 1, 0 ); - sbt.setBias( 2, 0, 0 ); - - // Weights - sbt.setWeight( 0, 0, 0, 2 ); - sbt.setWeight( 0, 0, 1, 1 ); - sbt.setWeight( 0, 1, 0, 3 ); - sbt.setWeight( 0, 1, 1, 1 ); - sbt.setWeight( 1, 0, 0, 1 ); - sbt.setWeight( 1, 1, 0, -1 ); - - // Initial bounds - sbt.setInputLowerBound( 0, 4 ); - sbt.setInputUpperBound( 0, 6 ); - sbt.setInputLowerBound( 1, 1 ); - sbt.setInputUpperBound( 1, 5 ); - - // Run the tightener - TS_ASSERT_THROWS_NOTHING( sbt.run( false ) ); // Test with costant concretization - - // Expected range: [-11, 7], +- epsilon - TS_ASSERT( sbt.getLowerBound( 2, 0 ) < -11 ); - TS_ASSERT( sbt.getLowerBound( 2, 0 ) > -11 - 0.001 ); - TS_ASSERT( sbt.getUpperBound( 2, 0 ) > 7 ); - TS_ASSERT( sbt.getUpperBound( 2, 0 ) < 7 + 0.001 ); - } - - void test_negative_lower_bounds_and_upper_bounds_get_zeroed() - { - // This simple example from the ReluVal paper - - SymbolicBoundTightener sbt; - - sbt.setNumberOfLayers( 3 ); - sbt.setLayerSize( 0, 2 ); - sbt.setLayerSize( 1, 2 ); - sbt.setLayerSize( 2, 1 ); - - sbt.allocateWeightAndBiasSpace(); - - // All biases are 0 - sbt.setBias( 0, 0, 0 ); - sbt.setBias( 0, 1, 0 ); - sbt.setBias( 1, 0, -30 ); // Strong negative bias for node (1,0) - sbt.setBias( 1, 1, 0 ); - sbt.setBias(2, 0, 0 ); - - // Weights - sbt.setWeight( 0, 0, 0, 2 ); - sbt.setWeight( 0, 0, 1, 1 ); - sbt.setWeight( 0, 1, 0, 3 ); - sbt.setWeight( 0, 1, 1, 1 ); - sbt.setWeight( 1, 0, 0, 1 ); - sbt.setWeight( 1, 1, 0, -1 ); - - // Initial bounds - sbt.setInputLowerBound( 0, 4 ); - sbt.setInputUpperBound( 0, 6 ); - sbt.setInputLowerBound( 1, 1 ); - sbt.setInputUpperBound( 1, 5 ); - - // Run the tightener - TS_ASSERT_THROWS_NOTHING( sbt.run( false ) ); // Test with costant concretization - - // Expected range: [-11, -5], +- epsilon - TS_ASSERT( sbt.getLowerBound( 2, 0 ) < -11 ); - TS_ASSERT( sbt.getLowerBound( 2, 0 ) > -11 - 0.001 ); - TS_ASSERT( sbt.getUpperBound( 2, 0 ) > -5 ); - TS_ASSERT( sbt.getUpperBound( 2, 0 ) < -5 + 0.001 ); - } - - void test_fixed_relus() - { - // This simple example from the ReluVal paper - - SymbolicBoundTightener sbt; - - sbt.setNumberOfLayers( 3 ); - sbt.setLayerSize( 0, 2 ); - sbt.setLayerSize( 1, 2 ); - sbt.setLayerSize( 2, 1 ); - - sbt.allocateWeightAndBiasSpace(); - - // All biases are 0 - sbt.setBias( 0, 0, 0 ); - sbt.setBias( 0, 1, 0 ); - sbt.setBias( 1, 0, -15 ); // Strong negative bias for node (1,0) - sbt.setBias( 1, 1, 0 ); - sbt.setBias( 2, 0, 0 ); - - // Weights - sbt.setWeight( 0, 0, 0, 2 ); - sbt.setWeight( 0, 0, 1, 1 ); - sbt.setWeight( 0, 1, 0, 3 ); - sbt.setWeight( 0, 1, 1, 1 ); - sbt.setWeight( 1, 0, 0, 1 ); - sbt.setWeight( 1, 1, 0, -1 ); - - // Initial bounds - sbt.setInputLowerBound( 0, 4 ); - sbt.setInputUpperBound( 0, 6 ); - sbt.setInputLowerBound( 1, 1 ); - sbt.setInputUpperBound( 1, 5 ); - - /// Case 1: ReLU not fixed - sbt.setReluStatus( 1, 0, ReluConstraint::PHASE_NOT_FIXED ); - - // Run the tightener - TS_ASSERT_THROWS_NOTHING( sbt.run( false ) ); // Test with costant concretization - - // Expected range: [-11, -5], +- epsilon - TS_ASSERT( sbt.getLowerBound( 2, 0 ) < -11 ); - TS_ASSERT( sbt.getLowerBound( 2, 0 ) > -11 - 0.001 ); - TS_ASSERT( sbt.getUpperBound( 2, 0 ) > 7 ); - TS_ASSERT( sbt.getUpperBound( 2, 0 ) < 7 + 0.001 ); - - /// Case 2: ReLU fixed to ACTIVE - sbt.setReluStatus( 1, 0, ReluConstraint::PHASE_ACTIVE ); - - // Run the tightener - TS_ASSERT_THROWS_NOTHING( sbt.run( false ) ); // Test with costant concretization - - // Expected range: [-11, -5], +- epsilon - TS_ASSERT( sbt.getLowerBound( 2, 0 ) < -9 ); - TS_ASSERT( sbt.getLowerBound( 2, 0 ) > -9 - 0.001 ); - TS_ASSERT( sbt.getUpperBound( 2, 0 ) > 1 ); - TS_ASSERT( sbt.getUpperBound( 2, 0 ) < 1 + 0.001 ); - - /// Case 2: ReLU fixed to INACTIVE - sbt.setReluStatus( 1, 0, ReluConstraint::PHASE_INACTIVE ); - - // Run the tightener - TS_ASSERT_THROWS_NOTHING( sbt.run( false ) ); // Test with costant concretization - - // Expected range: [-11, -5], +- epsilon - TS_ASSERT( sbt.getLowerBound( 2, 0 ) < -11 ); - TS_ASSERT( sbt.getLowerBound( 2, 0 ) > -11 - 0.001 ); - TS_ASSERT( sbt.getUpperBound( 2, 0 ) > -5 ); - TS_ASSERT( sbt.getUpperBound( 2, 0 ) < -5 + 0.001 ); - } - - void test_duplicate() - { - SymbolicBoundTightener sbt; - - sbt.setNumberOfLayers( 3 ); - sbt.setLayerSize( 0, 2 ); - sbt.setLayerSize( 1, 2 ); - sbt.setLayerSize( 2, 1 ); - - sbt.allocateWeightAndBiasSpace(); - - // All biases are 0 - sbt.setBias( 0, 0, 0 ); - sbt.setBias( 0, 1, 0 ); - sbt.setBias( 1, 0, -30 ); // Strong negative bias for node (1,0) - sbt.setBias( 1, 1, 0 ); - sbt.setBias( 2, 0, 0 ); - - // Weights - sbt.setWeight( 0, 0, 0, 2 ); - sbt.setWeight( 0, 0, 1, 1 ); - sbt.setWeight( 0, 1, 0, 3 ); - sbt.setWeight( 0, 1, 1, 1 ); - sbt.setWeight( 1, 0, 0, 1 ); - sbt.setWeight( 1, 1, 0, -1 ); - - // Initial bounds - sbt.setInputLowerBound( 0, 4 ); - sbt.setInputUpperBound( 0, 6 ); - sbt.setInputLowerBound( 1, 1 ); - sbt.setInputUpperBound( 1, 5 ); - - SymbolicBoundTightener sbt2; - TS_ASSERT_THROWS_NOTHING( sbt.storeIntoOther( sbt2 ) ); - - // Run the tightener - TS_ASSERT_THROWS_NOTHING( sbt2.run( false ) ); // Test with costant concretization - - // Expected range: [-11, -5], +- epsilon - TS_ASSERT( sbt2.getLowerBound( 2, 0 ) < -11 ); - TS_ASSERT( sbt2.getLowerBound( 2, 0 ) > -11 - 0.001 ); - TS_ASSERT( sbt2.getUpperBound( 2, 0 ) > -5 ); - TS_ASSERT( sbt2.getUpperBound( 2, 0 ) < -5 + 0.001 ); - } - - void test_todo() - { - TS_TRACE( "TODO: add a test for linear concretizations" ); - } -}; - -// -// Local Variables: -// compile-command: "make -C ../../.. " -// tags-file-name: "../../../TAGS" -// c-basic-offset: 4 -// End: -// diff --git a/src/input_parsers/AcasParser.cpp b/src/input_parsers/AcasParser.cpp index 613deed68..bab695159 100644 --- a/src/input_parsers/AcasParser.cpp +++ b/src/input_parsers/AcasParser.cpp @@ -230,74 +230,12 @@ void AcasParser::generateQuery( InputQuery &inputQuery ) for ( unsigned i = 0; i < _acasNeuralNetwork.getLayerSize( numberOfLayers - 1 ); ++i ) { - unsigned var = _nodeToB[NodeIndex( 0, i )]; + unsigned var = _nodeToB[NodeIndex( numberOfLayers - 1, i )]; nlr->setWeightedSumVariable( numberOfLayers - 1, i, var ); } // Store the reasoner in the input query inputQuery.setNetworkLevelReasoner( nlr ); - - // TODO: remove the below, once SBT is merged into NLR - if ( GlobalConfiguration::USE_SYMBOLIC_BOUND_TIGHTENING ) - { - // Prepare the symbolic bound tightener - SymbolicBoundTightener *sbt = new SymbolicBoundTightener; - - sbt->setNumberOfLayers( numberOfLayers ); - - for ( unsigned i = 0; i < numberOfLayers; ++i ) - sbt->setLayerSize( i, _acasNeuralNetwork.getLayerSize( i ) ); - - sbt->allocateWeightAndBiasSpace(); - - // Biases - for ( unsigned i = 1; i < numberOfLayers; ++i ) - for ( unsigned j = 0; j < _acasNeuralNetwork.getLayerSize( i ); ++j ) - sbt->setBias( i, j, _acasNeuralNetwork.getBias( i, j ) ); - - // Weights - for ( unsigned layer = 0; layer < numberOfLayers - 1; ++layer ) - { - unsigned targetLayerSize = _acasNeuralNetwork.getLayerSize( layer + 1 ); - for ( unsigned target = 0; target < targetLayerSize; ++target ) - { - for ( unsigned source = 0; source < _acasNeuralNetwork.getLayerSize( layer ); ++source ) - sbt->setWeight( layer, source, target, _acasNeuralNetwork.getWeight( layer, source, target ) ); - } - } - - // Initial bounds - for ( unsigned i = 0; i < inputLayerSize; ++i ) - { - double min, max; - _acasNeuralNetwork.getInputRange( i, min, max ); - - sbt->setInputLowerBound( i, min ); - sbt->setInputUpperBound( i, max ); - } - - // Variable indexing - for ( unsigned i = 1; i < numberOfLayers - 1; ++i ) - { - unsigned layerSize = _acasNeuralNetwork.getLayerSize( i ); - - for ( unsigned j = 0; j < layerSize; ++j ) - { - unsigned b = _nodeToB[NodeIndex( i, j )]; - sbt->setReluBVariable( i, j, b ); - - unsigned f = _nodeToF[NodeIndex( i, j )]; - sbt->setReluFVariable( i, j, f ); - } - } - - for ( unsigned i = 0; i < outputLayerSize; ++i ) - { - sbt->setReluFVariable( numberOfLayers - 1, i, _nodeToB[NodeIndex( numberOfLayers - 1, i )] ); - } - - inputQuery.setSymbolicBoundTightener( sbt ); - } } unsigned AcasParser::getNumInputVaribales() const diff --git a/src/query_loader/tests/Test_QueryLoader.h b/src/query_loader/tests/Test_QueryLoader.h index dac39f06d..49b14dc2f 100644 --- a/src/query_loader/tests/Test_QueryLoader.h +++ b/src/query_loader/tests/Test_QueryLoader.h @@ -20,6 +20,7 @@ #include "InputQuery.h" #include "MockFileFactory.h" #include "QueryLoader.h" +#include "ReluConstraint.h" #include "T/unistd.h" const String QUERY_TEST_FILE( "QueryTest.txt" );