diff --git a/conftest.py b/conftest.py index 70a3ccebec5..df5b0f31e59 100644 --- a/conftest.py +++ b/conftest.py @@ -11,6 +11,7 @@ import pytest + def pytest_runtest_setup(item): """ This method overrides pytest's default behavior for marked tests. @@ -45,8 +46,9 @@ def pytest_runtest_setup(item): elif markeroption: return elif item_markers: - if (not set(implicit_markers).issubset(item_markers) - and not item_markers.issubset(set(extended_implicit_markers))): + if not set(implicit_markers).issubset( + item_markers + ) and not item_markers.issubset(set(extended_implicit_markers)): pytest.skip('SKIPPED: Only running default, solver, and unmarked tests.') diff --git a/doc/OnlineDocs/contributed_packages/doe/doe.rst b/doc/OnlineDocs/contributed_packages/doe/doe.rst index 710bc939af9..e3c059ab078 100644 --- a/doc/OnlineDocs/contributed_packages/doe/doe.rst +++ b/doc/OnlineDocs/contributed_packages/doe/doe.rst @@ -1,16 +1,23 @@ Pyomo.DoE ========= +.. figure:: ../../../logos/doe/PyomoDoE-md.png + :scale: 25 % + :align: right + **Pyomo.DoE** (Pyomo Design of Experiments) is a Python library for model-based design of experiments using science-based models. -Pyomo.DoE was developed by **Jialu Wang** and **Alexander W. Dowling** at the University of Notre Dame as part of the `Carbon Capture Simulation for Industry Impact (CCSI2) `_. -project, funded through the U.S. Department Of Energy Office of Fossil Energy. + + +Pyomo.DoE was developed by **Jialu Wang** and **Alexander Dowling** at the University of Notre Dame as part of the `Carbon Capture Simulation for Industry Impact (CCSI2) `_ +project, funded through the U.S. Department of Energy Office of Fossil Energy and Carbon Management. Special thank you to John Siirola and Bethany Nicholson for extensive code reviews, suggestions, and improvements to Pyomo.DoE. If you use Pyomo.DoE, please cite: -[Wang and Dowling, 2022] Wang, Jialu, and Alexander W. Dowling. -"Pyomo. DOE: An open‐source package for model‐based design of experiments in Python." -AIChE Journal 68.12 (2022): e17813. `https://doi.org/10.1002/aic.17813` +Jialu Wang and Alexander W. Dowling (2022). "Pyomo.DoE: An open‐source package for model‐based design of experiments in Python." +*AIChE Journal* 68(12), e17813. `doi: 10.1002/aic.17813 `_ + + Methodology Overview --------------------- @@ -18,27 +25,27 @@ Methodology Overview Model-based Design of Experiments (MBDoE) is a technique to maximize the information gain of experiments by directly using science-based models with physically meaningful parameters. It is one key component in the model calibration and uncertainty quantification workflow shown below: .. figure:: flowchart.png - :scale: 25 % + :width: 99 % + :align: center - The exploratory analysis, parameter estimation, uncertainty analysis, and MBDoE are combined into an iterative framework to select, refine, and calibrate science-based mathematical models with quantified uncertainty. Currently, Pyomo.DoE focused on increasing parameter precision. + Pyomo.DoE integrates exploratory analysis, parameter estimation, uncertainty analysis, and MBDoE into an iterative framework to select, refine, and calibrate science-based mathematical models with quantified uncertainty. Currently, Pyomo.DoE focuses on increasing parameter precision. -Pyomo.DoE provides the exploratory analysis and MBDoE capabilities to the Pyomo ecosystem. The user provides one Pyomo model, a set of parameter nominal values, -the allowable design spaces for design variables, and the assumed observation error model. -During exploratory analysis, Pyomo.DoE checks if the model parameters can be inferred from the postulated measurements or preliminary data. -MBDoE then recommends optimized experimental conditions for collecting more data. -Parameter estimation packages such as `Parmest `_ can perform parameter estimation using the available data to infer values for parameters, -and facilitate an uncertainty analysis to approximate the parameter covariance matrix. +Pyomo.DoE provides science-based MBDoE capabilities to the Pyomo ecosystem. The user provides one Pyomo model, a set of parameter nominal values, +the allowable design spaces for design variables, and the assumed observation error structure (e.g., covariance matrix). +During exploratory analysis, Pyomo.DoE checks if the model parameters can be inferred from the proposed measurements or preliminary data. +Pyomo.DoE then recommends optimized experimental conditions for collecting more data. +Parameter estimation via `Parmest `_ can estimate uncertainty parameters from data and compute a parameter covariance matrix. If the parameter uncertainties are sufficiently small, the workflow terminates and returns the final model with quantified parametric uncertainty. -If not, MBDoE recommends optimized experimental conditions to generate new data. +Otherwise, Pyomo.DoE recommends the best next set of experimental conditions to generate new data. Below is an overview of the type of optimization models Pyomo.DoE can accomodate: * Pyomo.DoE is suitable for optimization models of **continuous** variables * Pyomo.DoE can handle **equality constraints** defining state variables -* Pyomo.DoE supports (Partial) Differential-Algebraic Equations (PDAE) models via Pyomo.DAE +* Pyomo.DoE supports (Partial) Differential-Algebraic Equations (PDAE) models via `Pyomo.DAE `_ * Pyomo.DoE also supports models with only algebraic constraints -The general form of a DAE problem that can be passed into Pyomo.DoE is shown below: +Pyomo.DoE considers the following DAE model: .. math:: \begin{align*} @@ -64,14 +71,14 @@ where: * :math:`\mathbf{t} \in \mathbb{R}^{N_t \times 1}` is a union of all time sets. .. note:: - * Process models provided to Pyomo.DoE should define an extra scenario index for all state variables and all parameters, as the first index before any other index. - * Process models must include an index for time, named ``t``. For steady-state models, t should be [0]. - * Measurements can have an extra index besides time. - * Parameters and design variables should be defined as Pyomo ``var`` components on the model to use ``direct_kaug`` mode, and can be defined as Pyomo ``Param`` object if not using ``direct_kaug``. + * Process models provided to Pyomo.DoE should define an extra scenario index for all state variables and all parameters, as the first index before any other index. The next version of Pyomo.DoE will remove this requirement. + * Process models must include an index for time, named ``t``. For steady-state models, ``t`` should be ``[0]``. + * Measurements can have an extra index (e.g., spatial domain) besides time. + * Parameters and design variables should be defined as Pyomo ``var`` components on the model to use ``direct_kaug`` mode. Other modes allow these to be defined as Pyomo ``Param`` objects. * Create model function should take scenarios as the first argument of this function. * Design variables are defined with and only with a time index. -Based on the above notation, the form of the MBDoE problem addressed in Pyomo.DoE is shown below: +Pyomo.DoE solves the following DAE-constrainted optimization problem: .. math:: \begin{equation} @@ -89,13 +96,13 @@ Based on the above notation, the form of the MBDoE problem addressed in Pyomo.Do where: -* :math:`\boldsymbol{\varphi}` are design variables, which are manipulated to maximize the information content of experiments. It should consist of one or more of :math:`\mathbf{u}(t), \mathbf{y}^{\mathbf{0}}({t_0}),\overline{\mathbf{w}}`. With a proper model formulation, the timepoints for control or measurements :math:`\mathbf{t}` can also be degrees of freedom. +* :math:`\boldsymbol{\varphi}` are design variables, which are manipulated to maximize the information content of experiments. :math:`\boldsymbol{\varphi}` should consist of one or more of :math:`\mathbf{u}(t), \mathbf{y}^{\mathbf{0}}({t_0}),\overline{\mathbf{w}}`. With a proper model formulation, the timepoints for control or measurements :math:`\mathbf{t}` can also be degrees of freedom. * :math:`\mathbf{M}` is the Fisher information matrix (FIM), estimated as the inverse of the covariance matrix of parameter estimates :math:`\boldsymbol{\hat{\theta}}`. A large FIM indicates more information contained in the experiment for parameter estimation. * :math:`\mathbf{Q}` is the dynamic sensitivity matrix, containing the partial derivatives of :math:`\mathbf{y}` with respect to :math:`\boldsymbol{\theta}`. -* :math:`\Psi` is the design criteria to measure FIM. +* :math:`\Psi(\cdot)` is the design criteria computed from the FIM. * :math:`\mathbf{V}_{\boldsymbol{\theta}}(\boldsymbol{\hat{\theta}})^{-1}` is the FIM of previous experiments. -Pyomo.DoE provides four design criteria :math:`\Psi` to measure the size of FIM: +Pyomo.DoE provides four design criteria :math:`\Psi(\cdot)` to measure the size of FIM: .. list-table:: Pyomo.DoE design criteria :header-rows: 1 @@ -117,26 +124,26 @@ Pyomo.DoE provides four design criteria :math:`\Psi` to measure the size of FIM - :math:`\text{cond}({\mathbf{M}})` - Ratio of the longest axis to the shortest axis of the confidence ellipse -In order to solve problems of the above, Pyomo.DoE implements the 2-stage stochastic program. Please see Wang and Dowling (2022) for details. +Pyomo.DoE reformulates the above optimization problem as a stochastic program. The scenarios are perturbations of the model parameters :math:`\boldsymbol{\theta}` to compute the sensitivities :math:`\mathbf{Q}` via finite difference. See `Wang and Dowling (2022) `_ for details. Pyomo.DoE Required Inputs -------------------------------- -The required inputs to the Pyomo.DoE solver are the following: +Pyomo.DoE requires the following inputs: -* A function that creates the process model -* Dictionary of parameters and their nominal value +* A Python function that creates the process model. This is similar to interface for `Parmest `_. +* Dictionary of parameters and their nominal values * Dictionary of measurements and their measurement time points * Dictionary of design variables and their control time points -* A Numpy ``array`` containing the Prior FIM -* Local and global optimization solver +* A Numpy ``array`` containing the prior FIM +* Local and global nonlinear optimization solver object Below is a list of arguments that Pyomo.DoE expects the user to provide. param_init : ``dictionary`` - A ``dictionary`` of parameter names and values. If they are an indexed variable, put the variable name and index in a nested ``Dictionary``. + A ``dictionary`` of parameter names and values. If they are indexed variables, put the variable names and indexes in a nested ``Dictionary``. design_variable_timepoints : ``dictionary`` - A ``dictionary`` of design variable names and its control time points. If this design var is independent of time (constant), set the time to [0] + A ``dictionary`` of design variable names and their control time points. If the design variables are time-invariant (constant), set the time to ``[0]`` measurement_object : ``object`` An ``object`` of the measurements, provided by the measurement class. @@ -162,8 +169,8 @@ Pyomo.DoE Solver Interface .. Note:: ``stochastic_program()`` includes the following steps: #. Build two-stage stochastic programming optimization model where scenarios correspond to finite difference approximations for the Jacobian of the response variables with respect to calibrated model parameters - #. Fix the experiment design decisions and solve a square (i.e., zero degrees of freedom) instance of the two-stage DOE problem. This step is for initialization. - #. Unfix the experiment design decisions and solve the two-stage DOE problem. + #. Fix the experiment design decisions and solve a square (i.e., zero degrees of freedom) instance of the two-stage DoE problem. This step is for initialization. + #. Unfix the experiment design decisions and solve the two-stage DoE problem. .. autoclass:: pyomo.contrib.doe.measurements.Measurements :members: __init__, check_subset @@ -171,10 +178,10 @@ Pyomo.DoE Solver Interface .. autoclass:: pyomo.contrib.doe.scenario.Scenario_generator :special-members: __init__ -.. autoclass:: pyomo.contrib.doe.result.FIM_result +.. autoclass:: pyomo.contrib.doe.result.FisherResults :special-members: __init__, calculate_FIM -.. autoclass:: pyomo.contrib.doe.result.Grid_Search_Result +.. autoclass:: pyomo.contrib.doe.result.GridSearchResult :special-members: __init__ @@ -182,8 +189,8 @@ Pyomo.DoE Solver Interface Pyomo.DoE Usage Example ------------------------ -We illustrate the use of Pyomo.DoE using a reaction kinetics example (Wang and Dowling, 2022). -The Arrhenius equations model the temperature dependence of the reaction rate coefficient :math:`k_1, k_2`. Assuming a first-order reaction mechanism gives the reaction rate model. Further, we assume only species A is fed to the reactor. +We illustrate the Pyomo.DoE interface with a reaction kinetics example when feed A is converted to species B and C (Wang and Dowling, 2022). +Assuming an Arrhenius temperature dependence for the reaction rates :math:`k_1, k_2`, first-order reaction mechanisms, and only species A is fed to the reactor gives the following DAE model: .. math:: @@ -201,11 +208,12 @@ The Arrhenius equations model the temperature dependence of the reaction rate co -:math:`C_A(t), C_B(t), C_C(t)` are the time-varying concentrations of the species A, B, C, respectively. -:math:`k_1, k_2` are the rates for the two chemical reactions using an Arrhenius equation with activation energies :math:`E_1, E_2` and pre-exponential factors :math:`A_1, A_2`. +Here :math:`C_A(t), C_B(t), C_C(t)` are the time-varying concentrations of the species A, B, C, respectively. The reaction rates +:math:`k_1, k_2` depend on the activation energies :math:`E_1, E_2` and pre-exponential factors :math:`A_1, A_2`. The goal of MBDoE is to optimize the experiment design variables :math:`\boldsymbol{\varphi} = (C_{A0}, T(t))`, where :math:`C_{A0},T(t)` are the initial concentration of species A and the time-varying reactor temperature, to maximize the precision of unknown model parameters :math:`\boldsymbol{\theta} = (A_1, E_1, A_2, E_2)` by measuring :math:`\mathbf{y}(t)=(C_A(t), C_B(t), C_C(t))`. -The observation errors are assumed to be independent both in time and across measurements with a constant standard deviation of 1 M for each species. +The observation errors are assumed to be independent in both time and across measurements with a constant standard deviation of 1 M for each species. Thus the measurement covariance matrix is the identity matrix. +See this `Jupyter notebook `_ for an extended version of this example. Step 0: Import Pyomo and the Pyomo.DoE module ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -280,6 +288,9 @@ The process model for the reaction kinetics problem is shown below. ... m.C[z,'CB',0.0].fix(0.0) ... m.C[z,'CC',0.0].fix(0.0) ... return m + +Next we define a function to discretize the model. + .. doctest:: >>> # === Discretization === @@ -292,7 +303,7 @@ The process model for the reaction kinetics problem is shown below. The first argument of the ``create_model`` function should be ``scena``. .. note:: - The model parameters ( :math:`A_1, A_2, E_1, E_2`) are defined as either ``Var`` or ``Param`` objects. This is suggested since it allows an easy transition when changing mode to ``direct_kaug``. + To use ``direct_kaug`` mode, the model parameters ( :math:`A_1, A_2, E_1, E_2`) definitions should be changed from ``Param`` to ``Var`` objects. Step 2: Define the inputs for Pyomo.DoE ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -322,7 +333,7 @@ Step 3: Compute the FIM of a square MBDoE problem This method computes an MBDoE optimization problem with no degree of freedom. This method can be accomplished by two modes, ``direct_kaug`` and ``sequential_finite``. -``direct_kaug`` mode requires the installation of the solver `k_aug `_. +``direct_kaug`` mode requires the installation of the solver `k_aug `_ which is available through the `IDAES-PSE extensions `_. .. doctest:: @@ -330,7 +341,7 @@ This method can be accomplished by two modes, ``direct_kaug`` and ``sequential_f >>> sensi_opt = 'sequential_finite' >>> # === Specify an experiment === >>> exp1 = {'CA0': {0: 5}, 'T': {0: 570, 0.125:300, 0.25:300, 0.375:300, 0.5:300, 0.625:300, 0.75:300, 0.875:300, 1:300}} - >>> # === Create the DOE object === + >>> # === Create the DoE object === >>> doe_object = DesignOfExperiments(parameter_dict, dv_pass, measure_class, create_model, ... prior_FIM=prior_none, discretize_model=disc) >>> # === Use ``compute_FIM`` to compute one MBDoE square problem === @@ -352,8 +363,7 @@ Step 4: Exploratory analysis (Enumeration) Exploratory analysis is suggested to enumerate the design space to check if the problem is identifiable, i.e., ensure that D-, E-optimality metrics are not small numbers near zero, and Modified E-optimality is not a big number. Pyomo.DoE accomplishes the exploratory analysis with the ``run_grid_search`` function. -It allows users to define any number of design decisions. Heatmaps can be drawn by two design variables, fixing other design variables. -1D curve can be drawn by one design variable, fixing all other variables. +It allows users to define any number of design decisions. Heatmaps (or lines) are used to visualize the sensitivity of the DoE criteria to changes in two (or one) design variables with any other design variables held constant. The function ``run_grid_search`` enumerates over the design space, each MBDoE problem accomplished by ``compute_FIM`` method. Therefore, ``run_grid_search`` supports only two modes: ``sequential_finite`` and ``direct_kaug``. @@ -388,20 +398,20 @@ Successful run of the above code shows the following figure: .. figure:: grid-1.png :scale: 35 % -A heatmap shows the change of the objective function, a.k.a. the experimental information content, in the design region. Horizontal and vertical axes are two design variables, while the color of each grid shows the experimental information content. Taking the Fig. Reactor case - A optimality as example, A-optimality shows that the most informative region is around $C_{A0}=5.0$ M, $T=300.0$ K, while the least informative region is around $C_{A0}=1.0$ M, $T=700.0$ K. +This heatmap shows the sensitivity of the DoE criteria, i.e., measures of the experimental information content, across the two-dimensional experiment design space. Horizontal and vertical axes are two design variables, while the color of each grid shows the experimental information content. For example, A-optimality shows that the most informative region is around :math:`C_{A0}=5.0` M, :math:`T=300.0` K, while the least informative region is around :math:`C_{A0}=1.0` M, :math:`T=700.0` K. Step 5: Gradient-based optimization ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -Pyomo.DoE accomplishes gradient-based optimization with the ``stochastic_program`` function for A- and D-optimality design. +Pyomo.DoE formulates a two-stage stochastic_program to compute A- and D-optimality designs. -This function solves twice: It solves the square version of the MBDoE problem first, and then unfixes the design variables as degree of freedoms and solves again. In this way the optimization problem can be well initialized. +This function solves twice to ensure reliable intialization: first, Pyomo.DoE solves a square problem. Next, Pyomo.DoE unfixes the design variables (adds degrees of freedom) and solves again. .. doctest:: >>> # === Specify a starting point === >>> exp1 = {'CA0': {0: 5}, 'T': {0: 300, 0.125:300, 0.25:300, 0.375:300, 0.5:300, 0.625:300, 0.75:300, 0.875:300, 1:300}} - >>> # === Define DOE object === + >>> # === Define DoE object === >>> doe_object = DesignOfExperiments(parameter_dict, dv_pass, measure_class, createmod, ... prior_FIM=prior_pass, discretize_model=disc) # doctest: +SKIP >>> # === Optimize === diff --git a/examples/pyomo/p-median/solver1.py b/examples/pyomo/p-median/solver1.py index 39220cdd70b..d4fbda0c81e 100644 --- a/examples/pyomo/p-median/solver1.py +++ b/examples/pyomo/p-median/solver1.py @@ -11,7 +11,7 @@ # Imports from Pyomo from pyomo.core import * -from pyomo.common.plugin import * +from pyomo.common.plugin_base import * from pyomo.opt import * diff --git a/examples/pyomo/p-median/solver2.py b/examples/pyomo/p-median/solver2.py index 537fca49428..c7215183481 100644 --- a/examples/pyomo/p-median/solver2.py +++ b/examples/pyomo/p-median/solver2.py @@ -11,7 +11,7 @@ # Imports from Pyomo from pyomo.core import * -from pyomo.common.plugin import * +from pyomo.common.plugin_base import * from pyomo.opt import * import random import copy diff --git a/pyomo/checker/checker.py b/pyomo/checker/checker.py index 4e9b4ed8990..6dfab294eca 100644 --- a/pyomo/checker/checker.py +++ b/pyomo/checker/checker.py @@ -9,7 +9,7 @@ # This software is distributed under the 3-clause BSD License. # ___________________________________________________________________________ -from pyomo.common.plugin import Interface +from pyomo.common.plugin_base import Interface class IModelChecker(Interface): diff --git a/pyomo/checker/hooks.py b/pyomo/checker/hooks.py index 4f287185a64..530a38c938d 100644 --- a/pyomo/checker/hooks.py +++ b/pyomo/checker/hooks.py @@ -11,7 +11,7 @@ __all__ = ['IPreCheckHook', 'IPostCheckHook'] -from pyomo.common.plugin import Interface +from pyomo.common.plugin_base import Interface class IPreCheckHook(Interface): diff --git a/pyomo/checker/plugins/checker.py b/pyomo/checker/plugins/checker.py index 9df7e1d8bda..171b1906592 100644 --- a/pyomo/checker/plugins/checker.py +++ b/pyomo/checker/plugins/checker.py @@ -13,7 +13,7 @@ import re import textwrap -from pyomo.common.plugin import SingletonPlugin, implements, ExtensionPoint +from pyomo.common.plugin_base import SingletonPlugin, implements, ExtensionPoint from pyomo.checker import IModelChecker, IPreCheckHook, IPostCheckHook diff --git a/pyomo/checker/plugins/checkers/model/assignment.py b/pyomo/checker/plugins/checkers/model/assignment.py index 29f8ce5bf75..7d1cbe63608 100644 --- a/pyomo/checker/plugins/checkers/model/assignment.py +++ b/pyomo/checker/plugins/checkers/model/assignment.py @@ -10,7 +10,7 @@ # ___________________________________________________________________________ import ast -import pyomo.common.plugin +import pyomo.common.plugin_base from pyomo.checker.plugins.checker import IterativeTreeChecker from pyomo.checker.plugins.model import ModelTrackerHook @@ -18,7 +18,7 @@ class ArrayValue(IterativeTreeChecker, ModelTrackerHook): - pyomo.common.plugin.alias('model.array_value', 'Check if assigning a value to an array of variables') + pyomo.common.plugin_base.alias('model.array_value', 'Check if assigning a value to an array of variables') varArrays = {} diff --git a/pyomo/checker/plugins/checkers/model/conditional.py b/pyomo/checker/plugins/checkers/model/conditional.py index ff5db5a36f2..59ae9fbf398 100644 --- a/pyomo/checker/plugins/checkers/model/conditional.py +++ b/pyomo/checker/plugins/checkers/model/conditional.py @@ -10,7 +10,7 @@ # ___________________________________________________________________________ import ast -import pyomo.common.plugin +import pyomo.common.plugin_base from pyomo.checker.plugins.model import ModelTrackerHook from pyomo.checker.plugins.checkers.model._rulebase import _ModelRuleChecker @@ -18,7 +18,7 @@ class ModelValue(_ModelRuleChecker, ModelTrackerHook): - pyomo.common.plugin.alias('model.value', 'Check if comparisons are done using the "value()" function.') + pyomo.common.plugin_base.alias('model.value', 'Check if comparisons are done using the "value()" function.') def checkerDoc(self): return """\ diff --git a/pyomo/checker/plugins/checkers/model/imports.py b/pyomo/checker/plugins/checkers/model/imports.py index b1bd2c68cf3..2e080514e1f 100644 --- a/pyomo/checker/plugins/checkers/model/imports.py +++ b/pyomo/checker/plugins/checkers/model/imports.py @@ -10,7 +10,7 @@ # ___________________________________________________________________________ import ast -import pyomo.common.plugin +import pyomo.common.plugin_base from pyomo.checker.plugins.checker import IterativeTreeChecker @@ -21,7 +21,7 @@ class Imports(IterativeTreeChecker): exists somewhere within the initial imports block """ - pyomo.common.plugin.alias('model.imports', 'Check if pyomo.core or pyomo.environ has been imported.') + pyomo.common.plugin_base.alias('model.imports', 'Check if pyomo.core or pyomo.environ has been imported.') def beginChecking(self, runner, script): self.pyomoImported = False diff --git a/pyomo/checker/plugins/checkers/model/model.py b/pyomo/checker/plugins/checkers/model/model.py index 0637e51fefd..6d999057473 100644 --- a/pyomo/checker/plugins/checkers/model/model.py +++ b/pyomo/checker/plugins/checkers/model/model.py @@ -10,14 +10,14 @@ # ___________________________________________________________________________ import ast -import pyomo.common.plugin +import pyomo.common.plugin_base from pyomo.checker.plugins.checker import IterativeTreeChecker class ModelName(IterativeTreeChecker): - pyomo.common.plugin.alias('model.model_name', 'Check that the "model" variable is assigned with a Pyomo model.') + pyomo.common.plugin_base.alias('model.model_name', 'Check that the "model" variable is assigned with a Pyomo model.') def beginChecking(self, runner, script): self.modelAssigned = False @@ -50,7 +50,7 @@ def check(self, runner, script, info): class ModelCreate(IterativeTreeChecker): - pyomo.common.plugin.alias('model.create', 'Check if a Pyomo model class is being assigned to a variable.') + pyomo.common.plugin_base.alias('model.create', 'Check if a Pyomo model class is being assigned to a variable.') def getTargetStrings(self, assign): ls = [] @@ -79,7 +79,7 @@ def check(self, runner, script, info): class DeprecatedModel(IterativeTreeChecker): - pyomo.common.plugin.alias('model.Model_class', 'Check if the deprecated Model class is being used.') + pyomo.common.plugin_base.alias('model.Model_class', 'Check if the deprecated Model class is being used.') def checkerDoc(self): return """\ diff --git a/pyomo/checker/plugins/checkers/model/rule.py b/pyomo/checker/plugins/checkers/model/rule.py index 60e8eb8b681..df190fb3d94 100644 --- a/pyomo/checker/plugins/checkers/model/rule.py +++ b/pyomo/checker/plugins/checkers/model/rule.py @@ -11,7 +11,7 @@ import sys import ast -import pyomo.common.plugin +import pyomo.common.plugin_base from pyomo.checker.plugins.model import ModelTrackerHook from pyomo.checker.plugins.checker import IterativeTreeChecker @@ -31,7 +31,7 @@ def arg_name(arg): class ModelShadowing(IterativeTreeChecker, ModelTrackerHook): - pyomo.common.plugin.alias('model.rule.shadowing', 'Ignoring for now') + pyomo.common.plugin_base.alias('model.rule.shadowing', 'Ignoring for now') def checkerDoc(self): return """\ @@ -50,7 +50,7 @@ def check(self, runner, script, info): class ModelAccess(IterativeTreeChecker, ModelTrackerHook): - pyomo.common.plugin.alias('model.rule.model_access', 'Check that a rule does not reference a global model instance.') + pyomo.common.plugin_base.alias('model.rule.model_access', 'Check that a rule does not reference a global model instance.') def checkerDoc(self): return """\ @@ -74,7 +74,7 @@ def check(self, runner, script, info): class ModelArgument(_ModelRuleChecker): - pyomo.common.plugin.alias('model.rule.model_argument', 'Check that the model instance is the first argument for a rule.') + pyomo.common.plugin_base.alias('model.rule.model_argument', 'Check that the model instance is the first argument for a rule.') def checkerDoc(self): return """\ @@ -96,7 +96,7 @@ def checkBody(self, funcdef): class NoneReturn(_ModelRuleChecker): - pyomo.common.plugin.alias('model.rule.none_return', 'Check that a rule does not return the value None.') + pyomo.common.plugin_base.alias('model.rule.none_return', 'Check that a rule does not return the value None.') def checkerDoc(self): return """\ diff --git a/pyomo/checker/plugins/checkers/py3k/printing.py b/pyomo/checker/plugins/checkers/py3k/printing.py index 6e3a86c84de..e7c8ff59297 100644 --- a/pyomo/checker/plugins/checkers/py3k/printing.py +++ b/pyomo/checker/plugins/checkers/py3k/printing.py @@ -10,13 +10,13 @@ # ___________________________________________________________________________ import re -import pyomo.common.plugin +import pyomo.common.plugin_base from pyomo.checker.plugins.checker import IterativeDataChecker class PrintParens(IterativeDataChecker): - pyomo.common.plugin.alias('py3k.print_parens', 'Check if print statements have parentheses.') + pyomo.common.plugin_base.alias('py3k.print_parens', 'Check if print statements have parentheses.') def __init__(self): self.current_lineno = 0 diff --git a/pyomo/checker/plugins/checkers/py3k/range.py b/pyomo/checker/plugins/checkers/py3k/range.py index a2c48444322..90377ff75c7 100644 --- a/pyomo/checker/plugins/checkers/py3k/range.py +++ b/pyomo/checker/plugins/checkers/py3k/range.py @@ -10,13 +10,13 @@ # ___________________________________________________________________________ import ast -import pyomo.common.plugin +import pyomo.common.plugin_base from pyomo.checker.plugins.checker import IterativeTreeChecker class XRange(IterativeTreeChecker): - pyomo.common.plugin.alias('py3k.xrange', 'Check if the xrange() function is used.') + pyomo.common.plugin_base.alias('py3k.xrange', 'Check if the xrange() function is used.') def check(self, runner, script, info): if isinstance(info, ast.Name): diff --git a/pyomo/checker/plugins/function.py b/pyomo/checker/plugins/function.py index 0099276af6f..035b66f2604 100644 --- a/pyomo/checker/plugins/function.py +++ b/pyomo/checker/plugins/function.py @@ -11,7 +11,7 @@ import ast -from pyomo.common.plugin import SingletonPlugin, implements +from pyomo.common.plugin_base import SingletonPlugin, implements from pyomo.checker.hooks import IPreCheckHook, IPostCheckHook diff --git a/pyomo/checker/plugins/model.py b/pyomo/checker/plugins/model.py index 48421b085e9..2801b60f54d 100644 --- a/pyomo/checker/plugins/model.py +++ b/pyomo/checker/plugins/model.py @@ -11,7 +11,7 @@ import ast -from pyomo.common.plugin import SingletonPlugin, implements +from pyomo.common.plugin_base import SingletonPlugin, implements from pyomo.checker.hooks import IPreCheckHook diff --git a/pyomo/checker/runner.py b/pyomo/checker/runner.py index ba49cf44463..4b00ef9546a 100644 --- a/pyomo/checker/runner.py +++ b/pyomo/checker/runner.py @@ -11,7 +11,7 @@ import ast -from pyomo.common.plugin import ExtensionPoint +from pyomo.common.plugin_base import ExtensionPoint from pyomo.checker.checker import IModelChecker from pyomo.checker.script import ModelScript diff --git a/pyomo/common/getGSL.py b/pyomo/common/getGSL.py index cfeeb7c242d..130eecba284 100644 --- a/pyomo/common/getGSL.py +++ b/pyomo/common/getGSL.py @@ -9,53 +9,6 @@ # This software is distributed under the 3-clause BSD License. # ___________________________________________________________________________ -import logging -import os -import platform -import sys -from pyomo.common import Library -from pyomo.common.download import FileDownloader +from pyomo.common.deprecation import (relocated_module) -logger = logging.getLogger('pyomo.common') - -# These URLs were retrieved from -# https://ampl.com/resources/extended-function-library/ -urlmap = { - 'linux': 'https://ampl.com/dl/open/amplgsl/amplgsl-linux%s.zip', - 'windows': 'https://ampl.com/dl/open/amplgsl/amplgsl-win%s.zip', - 'cygwin': 'https://ampl.com/dl/open/amplgsl/amplgsl-win%s.zip', - 'darwin': 'https://ampl.com/dl/open/amplgsl/amplgsl-osx.zip' -} - -def find_GSL(): - # FIXME: the GSL interface is currently broken in PyPy: - if platform.python_implementation().lower().startswith('pypy'): - return None - return Library('amplgsl.dll').path() - -def get_gsl(downloader): - system, bits = downloader.get_sysinfo() - url = downloader.get_platform_url(urlmap) - if '%s' in url: - url = url % (bits,) - - downloader.set_destination_filename(os.path.join('lib', 'amplgsl.dll')) - - logger.info("Fetching GSL from %s and installing it to %s" - % (url, downloader.destination())) - - downloader.get_binary_file_from_zip_archive(url, 'amplgsl.dll') - -def main(argv): - downloader = FileDownloader() - downloader.parse_args(argv) - get_gsl(downloader) - -if __name__ == '__main__': - logger.setLevel(logging.INFO) - try: - main(sys.argv[1:]) - except Exception as e: - print(e.message or str(e)) - print("Usage: %s [--insecure] [target]" % os.path.basename(sys.argv[0])) - sys.exit(1) +relocated_module('pyomo.common.gsl', version='TBD') diff --git a/pyomo/common/gsl.py b/pyomo/common/gsl.py new file mode 100644 index 00000000000..409a0a00f37 --- /dev/null +++ b/pyomo/common/gsl.py @@ -0,0 +1,32 @@ +# ___________________________________________________________________________ +# +# Pyomo: Python Optimization Modeling Objects +# Copyright (c) 2008-2022 +# National Technology and Engineering Solutions of Sandia, LLC +# Under the terms of Contract DE-NA0003525 with National Technology and +# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain +# rights in this software. +# This software is distributed under the 3-clause BSD License. +# ___________________________________________________________________________ + +import logging +import platform +from pyomo.common import Library +from pyomo.common.deprecation import deprecated + +logger = logging.getLogger('pyomo.common') + +@deprecated( + "Use of get_gsl is deprecated and NO LONGER FUNCTIONS as of February 9, " + "2023. ", + version='TBD') +def get_gsl(downloader): + logger.info("As of February 9, 2023, AMPL GSL can no longer be downloaded\ + through download-extensions. Visit https://portal.ampl.com/\ + to download the AMPL GSL binaries.") + +def find_GSL(): + # FIXME: the GSL interface is currently broken in PyPy: + if platform.python_implementation().lower().startswith('pypy'): + return None + return Library('amplgsl.dll').path() diff --git a/pyomo/common/plugin.py b/pyomo/common/plugin.py index aeea7a4fffd..3965bfecc74 100644 --- a/pyomo/common/plugin.py +++ b/pyomo/common/plugin.py @@ -8,334 +8,8 @@ # rights in this software. # This software is distributed under the 3-clause BSD License. # ___________________________________________________________________________ -# -# This module was originally developed as part of the PyUtilib project -# Copyright (c) 2008 Sandia Corporation. -# This software is distributed under the BSD License. -# Under the terms of Contract DE-AC04-94AL85000 with Sandia Corporation, -# the U.S. Government retains certain rights in this software. -# ___________________________________________________________________________ - -import collections -import inspect -import sys -from weakref import ref as weakref_ref - -from pyomo.common.errors import PyomoException -from pyomo.common.deprecation import deprecated, deprecation_warning - -if sys.version_info[:2] >= (3,7): - _deterministic_dict = dict -else: - from pyomo.common.collections import OrderedDict - _deterministic_dict = OrderedDict - - -class PluginGlobals(object): - @staticmethod - @deprecated("The PluginGlobals environment manager is deprecated: " - "Pyomo only supports a single global environment", - version='6.0') - def add_env(name): - pass - - @staticmethod - @deprecated("The PluginGlobals environment manager is deprecated: " - "Pyomo only supports a single global environment", - version='6.0') - def pop_env(): - pass - - @staticmethod - @deprecated("The PluginGlobals environment manager is deprecated: " - "Pyomo only supports a single global environment", - version='6.0') - def clear(): - pass - - -class PluginError(PyomoException): - pass - - -def alias(name, doc=None, subclass=None): - if subclass is not None: - deprecation_warning( - "The Pyomo plugin infrastructure alias() function does " - "not support the subclass flag.", version='6.0') - calling_frame = inspect.currentframe().f_back - locals_ = calling_frame.f_locals - # - # Some sanity checks - # - assert locals_ is not calling_frame.f_globals and '__module__' in locals_, \ - 'implements() can only be used in a class definition' - # - locals_.setdefault('__plugin_aliases__', []).append((name, doc)) - - -def implements(interface, inherit=None, namespace=None, service=False): - if namespace is not None: - deprecation_warning( - "The Pyomo plugin infrastructure only supports a " - "single global namespace.", version='6.0') - calling_frame = inspect.currentframe().f_back - locals_ = calling_frame.f_locals - # - # Some sanity checks - # - assert locals_ is not calling_frame.f_globals and '__module__' in locals_, \ - 'implements() can only be used in a class definition' - assert issubclass(interface, Interface) - # - locals_.setdefault('__implements__', []).append( - (interface, inherit, service) - ) - - -class InterfaceMeta(type): - def __new__(cls, name, bases, classdict, *args, **kwargs): - # Ensure that all interfaces have their own _plugins & _aliases - # dictionaries - classdict.setdefault('_next_id', 0) - classdict.setdefault('_plugins', {}) - classdict.setdefault('_aliases', {}) - return super().__new__(cls, name, bases, classdict, *args, **kwargs) - - -class Interface(metaclass=InterfaceMeta): - pass - - -class _deprecated_plugin_dict(dict): - def __init__(self, name, classdict): - super().__init__() - msg = classdict.pop('__deprecated_message__', None) - if not msg: - msg = 'The %s interface has been deprecated' % (name,) - version = classdict.pop('__deprecated_version__', None) - remove_in = classdict.pop('__deprecated_remove_in__', None) - self._deprecation_info = { - 'msg': msg, 'version': version, 'remove_in': remove_in - } - - def __setitem__(self, key, val): - deprecation_warning(**self._deprecation_info) - super().__setitem__(key, val) - - def items(self): - deprecation_warning(**self._deprecation_info) - return super().items() - - -class DeprecatedInterfaceMeta(InterfaceMeta): - def __new__(cls, name, bases, classdict, *args, **kwargs): - classdict.setdefault( - '_plugins', _deprecated_plugin_dict(name, classdict) - ) - return super().__new__(cls, name, bases, classdict, *args, **kwargs) - - -class DeprecatedInterface(Interface, metaclass=DeprecatedInterfaceMeta): - pass - - -class PluginMeta(type): - - def __new__(cls, name, bases, classdict, *args, **kwargs): - # This plugin is a singleton plugin based on the __singleton__ - # class attribute, OR if not specified, if any base class is a - # singleton plugin - _singleton = classdict.pop( - '__singleton__', - any(getattr(base, '__singleton__', None) is not None - for base in bases) - ) - # This prevents base class __singleton__, __plugin_aliases__, - # and __implements__ from implicitly bleeding through and being - # accidentally shared across subclasses. - classdict['__singleton__'] = None - aliases = classdict.setdefault('__plugin_aliases__', []) - implements = classdict.setdefault('__implements__', []) - # If multiple classes (classdict, and/or any base) implement() - # the same interface, use standard Python rules to determine - # which implements() should govern (i.e. classdict supersedes - # bases, bases resolved in order) - interfaces = set(impl[0] for impl in implements) - for base in bases: - implements.extend( - ep for ep in getattr(base, '__implements__', []) - if ep[0] not in interfaces - ) - interfaces.update(impl[0] for impl in implements) - for interface, inherit, service in implements: - if not inherit: - continue - if not any(issubclass(base, interface) for base in bases): - bases = bases + (interface,) - # Python requires that a class' metaclass be a - # (nonstrict) subclass of the metaclasses of all its - # base classes. Check, and declare a new metaclass if - # necessary. - if not issubclass(cls, type(interface)): - class tmp_meta(cls, type(interface)): - def __new__(cls, name, bases, classdict, - *args, **kwargs): - # This is a plugin and not an Interface. Do - # not set up dicts for the interface - # definition. - classdict.setdefault('_plugins', None) - classdict.setdefault('_aliases', None) - return super().__new__( - cls, name, bases, classdict, *args, **kwargs) - cls = tmp_meta - - new_class = super().__new__( - cls, name, bases, classdict, *args, **kwargs) - - # Register the new class with the interfaces - for interface, inherit, service in implements: - interface._plugins[new_class] = _deterministic_dict() - interface._aliases.update( - {name: (new_class, doc) for name, doc in aliases} - ) - - if _singleton: - new_class.__singleton__ = new_class() - - return new_class - - -class Plugin(object, metaclass=PluginMeta): - def __new__(cls): - if cls.__singleton__ is not None: - raise RuntimeError( - "Cannot create multiple singleton plugin instances of type %s" - % (cls,)) - obj = super().__new__(cls) - obj._plugin_ids = {} - # Record this instance (service) with all Interfaces - for interface, inherit, service in cls.__implements__: - _id = interface._next_id - interface._next_id += 1 - obj._plugin_ids[interface] = _id - interface._plugins[cls][_id] = (weakref_ref(obj), service) - return obj - - def activate(self): - cls = self.__class__ - for interface, inherit, service in cls.__implements__: - _id = self._plugin_ids[interface] - obj, service = interface._plugins[cls][_id] - if not service: - interface._plugins[cls][_id] = obj, True - enable = activate - - def deactivate(self): - cls = self.__class__ - for interface, inherit, service in cls.__implements__: - _id = self._plugin_ids[interface] - obj, service = interface._plugins[cls][_id] - if service: - interface._plugins[cls][_id] = obj, False - disable = deactivate - - def enabled(self): - cls = self.__class__ - return any(interface._plugins[cls][self._plugin_ids[interface]][1] - for interface, inherit, service in cls.__implements__) - - -class SingletonPlugin(Plugin): - __singleton__ = True - - -class ExtensionPoint(object): - def __init__(self, interface): - assert issubclass(interface, Interface) - self._interface = interface - - def __iter__(self, key=None, all=False): - for cls, plugins in self._interface._plugins.items(): - remove = [] - for i, (obj, service) in plugins.items(): - if not obj(): - remove.append(i) - elif ((all or service) and - (key is None or key is cls or key == cls.__name__)): - yield obj() - for i in remove: - del plugins[i] - - def __len__(self): - return len(list(self.__iter__())) - - def extensions(self, all=False, key=None): - return list(self.__iter__(key=key, all=all)) - - def __call__(self, key=None, all=False): - return self.extensions(all=all, key=key) - - def service(self, key=None, all=False): - """Return the unique service that matches the interface of this - extension point. An exception occurs if no service matches the - specified key, or if multiple services match. - """ - ans = self.extensions(all=all, key=key) - if len(ans) == 1: - # - # There is a single service, so return it. - # - return ans[0] - elif not ans: - return None - else: - raise PluginError("The ExtensionPoint does not have a unique " - "service! %d services are defined for interface" - " '%s' (key=%s)." % - (len(ans), self._interface.__name__, str(key))) - - -class PluginFactory(object): - - def __init__(self, interface): - self.interface = interface - - def __call__(self, name, *args, **kwds): - name = str(name) - if name not in self.interface._aliases: - return None - else: - return self.interface._aliases[name][0](*args, **kwds) - - def services(self): - return list(self.interface._aliases) - - def get_class(self, name): - return self.interface._aliases.get(name, [None])[0] - - def doc(self, name): - name = str(name) - if name not in self.interface._aliases: - return "" - else: - return self.interface._aliases[name][1] - def deactivate(self, name): - if isinstance(name, str): - cls = self.get_class(name) - if cls is None: - return - for service in ExtensionPoint(self.interface)(key=cls): - service.deactivate() +from pyomo.common.deprecation import (relocated_module) - def activate(self, name): - if isinstance(name, str): - cls = self.get_class(name) - if cls is None: - return - for service in ExtensionPoint(self.interface)(all=True, key=cls): - service.activate() +relocated_module('pyomo.common.plugin_base', version='TBD') -# Old name for creating plugin factories -CreatePluginFactory = PluginFactory diff --git a/pyomo/common/plugin_base.py b/pyomo/common/plugin_base.py new file mode 100644 index 00000000000..aeea7a4fffd --- /dev/null +++ b/pyomo/common/plugin_base.py @@ -0,0 +1,341 @@ +# ___________________________________________________________________________ +# +# Pyomo: Python Optimization Modeling Objects +# Copyright (c) 2008-2022 +# National Technology and Engineering Solutions of Sandia, LLC +# Under the terms of Contract DE-NA0003525 with National Technology and +# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain +# rights in this software. +# This software is distributed under the 3-clause BSD License. +# ___________________________________________________________________________ +# +# This module was originally developed as part of the PyUtilib project +# Copyright (c) 2008 Sandia Corporation. +# This software is distributed under the BSD License. +# Under the terms of Contract DE-AC04-94AL85000 with Sandia Corporation, +# the U.S. Government retains certain rights in this software. +# ___________________________________________________________________________ + +import collections +import inspect +import sys +from weakref import ref as weakref_ref + +from pyomo.common.errors import PyomoException +from pyomo.common.deprecation import deprecated, deprecation_warning + +if sys.version_info[:2] >= (3,7): + _deterministic_dict = dict +else: + from pyomo.common.collections import OrderedDict + _deterministic_dict = OrderedDict + + +class PluginGlobals(object): + @staticmethod + @deprecated("The PluginGlobals environment manager is deprecated: " + "Pyomo only supports a single global environment", + version='6.0') + def add_env(name): + pass + + @staticmethod + @deprecated("The PluginGlobals environment manager is deprecated: " + "Pyomo only supports a single global environment", + version='6.0') + def pop_env(): + pass + + @staticmethod + @deprecated("The PluginGlobals environment manager is deprecated: " + "Pyomo only supports a single global environment", + version='6.0') + def clear(): + pass + + +class PluginError(PyomoException): + pass + + +def alias(name, doc=None, subclass=None): + if subclass is not None: + deprecation_warning( + "The Pyomo plugin infrastructure alias() function does " + "not support the subclass flag.", version='6.0') + calling_frame = inspect.currentframe().f_back + locals_ = calling_frame.f_locals + # + # Some sanity checks + # + assert locals_ is not calling_frame.f_globals and '__module__' in locals_, \ + 'implements() can only be used in a class definition' + # + locals_.setdefault('__plugin_aliases__', []).append((name, doc)) + + +def implements(interface, inherit=None, namespace=None, service=False): + if namespace is not None: + deprecation_warning( + "The Pyomo plugin infrastructure only supports a " + "single global namespace.", version='6.0') + calling_frame = inspect.currentframe().f_back + locals_ = calling_frame.f_locals + # + # Some sanity checks + # + assert locals_ is not calling_frame.f_globals and '__module__' in locals_, \ + 'implements() can only be used in a class definition' + assert issubclass(interface, Interface) + # + locals_.setdefault('__implements__', []).append( + (interface, inherit, service) + ) + + +class InterfaceMeta(type): + def __new__(cls, name, bases, classdict, *args, **kwargs): + # Ensure that all interfaces have their own _plugins & _aliases + # dictionaries + classdict.setdefault('_next_id', 0) + classdict.setdefault('_plugins', {}) + classdict.setdefault('_aliases', {}) + return super().__new__(cls, name, bases, classdict, *args, **kwargs) + + +class Interface(metaclass=InterfaceMeta): + pass + + +class _deprecated_plugin_dict(dict): + def __init__(self, name, classdict): + super().__init__() + msg = classdict.pop('__deprecated_message__', None) + if not msg: + msg = 'The %s interface has been deprecated' % (name,) + version = classdict.pop('__deprecated_version__', None) + remove_in = classdict.pop('__deprecated_remove_in__', None) + self._deprecation_info = { + 'msg': msg, 'version': version, 'remove_in': remove_in + } + + def __setitem__(self, key, val): + deprecation_warning(**self._deprecation_info) + super().__setitem__(key, val) + + def items(self): + deprecation_warning(**self._deprecation_info) + return super().items() + + +class DeprecatedInterfaceMeta(InterfaceMeta): + def __new__(cls, name, bases, classdict, *args, **kwargs): + classdict.setdefault( + '_plugins', _deprecated_plugin_dict(name, classdict) + ) + return super().__new__(cls, name, bases, classdict, *args, **kwargs) + + +class DeprecatedInterface(Interface, metaclass=DeprecatedInterfaceMeta): + pass + + +class PluginMeta(type): + + def __new__(cls, name, bases, classdict, *args, **kwargs): + # This plugin is a singleton plugin based on the __singleton__ + # class attribute, OR if not specified, if any base class is a + # singleton plugin + _singleton = classdict.pop( + '__singleton__', + any(getattr(base, '__singleton__', None) is not None + for base in bases) + ) + # This prevents base class __singleton__, __plugin_aliases__, + # and __implements__ from implicitly bleeding through and being + # accidentally shared across subclasses. + classdict['__singleton__'] = None + aliases = classdict.setdefault('__plugin_aliases__', []) + implements = classdict.setdefault('__implements__', []) + # If multiple classes (classdict, and/or any base) implement() + # the same interface, use standard Python rules to determine + # which implements() should govern (i.e. classdict supersedes + # bases, bases resolved in order) + interfaces = set(impl[0] for impl in implements) + for base in bases: + implements.extend( + ep for ep in getattr(base, '__implements__', []) + if ep[0] not in interfaces + ) + interfaces.update(impl[0] for impl in implements) + for interface, inherit, service in implements: + if not inherit: + continue + if not any(issubclass(base, interface) for base in bases): + bases = bases + (interface,) + # Python requires that a class' metaclass be a + # (nonstrict) subclass of the metaclasses of all its + # base classes. Check, and declare a new metaclass if + # necessary. + if not issubclass(cls, type(interface)): + class tmp_meta(cls, type(interface)): + def __new__(cls, name, bases, classdict, + *args, **kwargs): + # This is a plugin and not an Interface. Do + # not set up dicts for the interface + # definition. + classdict.setdefault('_plugins', None) + classdict.setdefault('_aliases', None) + return super().__new__( + cls, name, bases, classdict, *args, **kwargs) + cls = tmp_meta + + new_class = super().__new__( + cls, name, bases, classdict, *args, **kwargs) + + # Register the new class with the interfaces + for interface, inherit, service in implements: + interface._plugins[new_class] = _deterministic_dict() + interface._aliases.update( + {name: (new_class, doc) for name, doc in aliases} + ) + + if _singleton: + new_class.__singleton__ = new_class() + + return new_class + + +class Plugin(object, metaclass=PluginMeta): + def __new__(cls): + if cls.__singleton__ is not None: + raise RuntimeError( + "Cannot create multiple singleton plugin instances of type %s" + % (cls,)) + obj = super().__new__(cls) + obj._plugin_ids = {} + # Record this instance (service) with all Interfaces + for interface, inherit, service in cls.__implements__: + _id = interface._next_id + interface._next_id += 1 + obj._plugin_ids[interface] = _id + interface._plugins[cls][_id] = (weakref_ref(obj), service) + return obj + + def activate(self): + cls = self.__class__ + for interface, inherit, service in cls.__implements__: + _id = self._plugin_ids[interface] + obj, service = interface._plugins[cls][_id] + if not service: + interface._plugins[cls][_id] = obj, True + enable = activate + + def deactivate(self): + cls = self.__class__ + for interface, inherit, service in cls.__implements__: + _id = self._plugin_ids[interface] + obj, service = interface._plugins[cls][_id] + if service: + interface._plugins[cls][_id] = obj, False + disable = deactivate + + def enabled(self): + cls = self.__class__ + return any(interface._plugins[cls][self._plugin_ids[interface]][1] + for interface, inherit, service in cls.__implements__) + + +class SingletonPlugin(Plugin): + __singleton__ = True + + +class ExtensionPoint(object): + def __init__(self, interface): + assert issubclass(interface, Interface) + self._interface = interface + + def __iter__(self, key=None, all=False): + for cls, plugins in self._interface._plugins.items(): + remove = [] + for i, (obj, service) in plugins.items(): + if not obj(): + remove.append(i) + elif ((all or service) and + (key is None or key is cls or key == cls.__name__)): + yield obj() + for i in remove: + del plugins[i] + + def __len__(self): + return len(list(self.__iter__())) + + def extensions(self, all=False, key=None): + return list(self.__iter__(key=key, all=all)) + + def __call__(self, key=None, all=False): + return self.extensions(all=all, key=key) + + def service(self, key=None, all=False): + """Return the unique service that matches the interface of this + extension point. An exception occurs if no service matches the + specified key, or if multiple services match. + """ + ans = self.extensions(all=all, key=key) + if len(ans) == 1: + # + # There is a single service, so return it. + # + return ans[0] + elif not ans: + return None + else: + raise PluginError("The ExtensionPoint does not have a unique " + "service! %d services are defined for interface" + " '%s' (key=%s)." % + (len(ans), self._interface.__name__, str(key))) + + +class PluginFactory(object): + + def __init__(self, interface): + self.interface = interface + + def __call__(self, name, *args, **kwds): + name = str(name) + if name not in self.interface._aliases: + return None + else: + return self.interface._aliases[name][0](*args, **kwds) + + def services(self): + return list(self.interface._aliases) + + def get_class(self, name): + return self.interface._aliases.get(name, [None])[0] + + def doc(self, name): + name = str(name) + if name not in self.interface._aliases: + return "" + else: + return self.interface._aliases[name][1] + + def deactivate(self, name): + if isinstance(name, str): + cls = self.get_class(name) + if cls is None: + return + for service in ExtensionPoint(self.interface)(key=cls): + service.deactivate() + + def activate(self, name): + if isinstance(name, str): + cls = self.get_class(name) + if cls is None: + return + for service in ExtensionPoint(self.interface)(all=True, key=cls): + service.activate() + +# Old name for creating plugin factories +CreatePluginFactory = PluginFactory diff --git a/pyomo/common/plugins.py b/pyomo/common/plugins.py index 5958ebaa736..70ab4bec801 100644 --- a/pyomo/common/plugins.py +++ b/pyomo/common/plugins.py @@ -9,8 +9,6 @@ # This software is distributed under the 3-clause BSD License. # ___________________________________________________________________________ -from . import download -from . import getGSL - def load(): - download.DownloadFactory.register('gsl')(getGSL.get_gsl) + pass + diff --git a/pyomo/common/tests/test_plugin.py b/pyomo/common/tests/test_plugin.py index fafccc7a80c..531893ed588 100644 --- a/pyomo/common/tests/test_plugin.py +++ b/pyomo/common/tests/test_plugin.py @@ -14,7 +14,7 @@ from pyomo.common.unittest import TestCase from pyomo.common.log import LoggingIntercept -from pyomo.common.plugin import ( +from pyomo.common.plugin_base import ( Interface, Plugin, SingletonPlugin, ExtensionPoint, implements, alias, PluginFactory, PluginError, PluginGlobals, DeprecatedInterface ) diff --git a/pyomo/contrib/appsi/solvers/tests/test_ipopt_persistent.py b/pyomo/contrib/appsi/solvers/tests/test_ipopt_persistent.py index 2751c0a4746..6b86deaa535 100644 --- a/pyomo/contrib/appsi/solvers/tests/test_ipopt_persistent.py +++ b/pyomo/contrib/appsi/solvers/tests/test_ipopt_persistent.py @@ -1,7 +1,7 @@ import pyomo.environ as pe import pyomo.common.unittest as unittest from pyomo.contrib.appsi.cmodel import cmodel_available -from pyomo.common.getGSL import find_GSL +from pyomo.common.gsl import find_GSL @unittest.skipUnless(cmodel_available, 'appsi extensions are not available') diff --git a/pyomo/contrib/appsi/utils/tests/test_collect_vars_and_named_exprs.py b/pyomo/contrib/appsi/utils/tests/test_collect_vars_and_named_exprs.py index 85ab7ad6096..e1ea2439fbf 100644 --- a/pyomo/contrib/appsi/utils/tests/test_collect_vars_and_named_exprs.py +++ b/pyomo/contrib/appsi/utils/tests/test_collect_vars_and_named_exprs.py @@ -3,7 +3,7 @@ from pyomo.contrib.appsi.utils import collect_vars_and_named_exprs from pyomo.contrib.appsi.cmodel import cmodel, cmodel_available from typing import Callable -from pyomo.common.getGSL import find_GSL +from pyomo.common.gsl import find_GSL class TestCollectVarsAndNamedExpressions(unittest.TestCase): diff --git a/pyomo/contrib/doe/doe.py b/pyomo/contrib/doe/doe.py index a2c500176f1..4f0c8dcb2d4 100644 --- a/pyomo/contrib/doe/doe.py +++ b/pyomo/contrib/doe/doe.py @@ -315,8 +315,9 @@ def compute_FIM(self, design_values, mode='sequential_finite', FIM_store_name=No """ This function solves a square Pyomo model with fixed design variables to compute the FIM. It calculates FIM with sensitivity information from four modes: - 1. sequential_finite: Calculates a one scenario model multiple times for multiple scenarios. - Sensitivity info estimated by finite difference + + 1. sequential_finite: Calculates a single-scenario model which is solved many times in series to estimate + sensitivity information by finite difference 2. sequential_sipopt: calculate sensitivity by sIPOPT [Experimental] 3. sequential_kaug: calculate sensitivity by k_aug [Experimental] 4. direct_kaug: calculate sensitivity by k_aug with direct sensitivity @@ -833,13 +834,14 @@ def _extract_jac(self, m): def run_grid_search(self, design_values, design_ranges, design_dimension_names, design_control_time, mode='sequential_finite', tee_option=False, scale_nominal_param_value=False, scale_constant_value=1, store_name= None, read_name=None, - filename=None, formula='central', step=0.001): + filename=None, formula='central', step=0.001): """ Enumerate through full grid search for any number of design variables; solve square problems sequentially to compute FIMs. It calculates FIM with sensitivity information from four modes: + 1. sequential_finite: Calculates a one scenario model multiple times for multiple scenarios. - Sensitivity info estimated by finite difference + Sensitivity info estimated by finite difference 2. sequential_sipopt: calculate sensitivity by sIPOPT [Experimental] 3. sequential_kaug: calculate sensitivity by k_aug [Experimental] 4. direct_kaug: calculate sensitivity by k_aug with direct sensitivity @@ -864,7 +866,7 @@ def run_grid_search(self, design_values, design_ranges, design_dimension_names, scale all elements in Jacobian matrix, default is 1. store_name: a string of file name. If not None, store results with this name. - Since there are maultiple experiments, results are numbered with a scalar number, + Since there are multiple experiments, results are numbered with a scalar number, and the result for one grid is 'store_name(count).csv' (count is the number of count). read_name: a string of file name. If not None, read result files. diff --git a/pyomo/contrib/doe/measurements.py b/pyomo/contrib/doe/measurements.py index b4a57195598..d5eedaf41f0 100644 --- a/pyomo/contrib/doe/measurements.py +++ b/pyomo/contrib/doe/measurements.py @@ -36,20 +36,27 @@ class Measurements: def __init__(self, measurement_index_time, variance=None, ind_string='_index_'): """ - This class stores information on which algebraic and differential variables in the Pyomo model are considered measurements. - This includes the functionality to specify indices for these measurement variables. - For example, with a partial differential algebraic equation model, - these measurement index sets can specify which spatial and temporal coordinates each measurement is available. - Moreover, this class supports defining the covariance matrix for all measurements. + This class stores information on which algebraic and differential + variables in the Pyomo model are considered measurements. + + This includes the functionality to specify indices for these + measurement variables. For example, with a partial differential + algebraic equation model, these measurement index sets can + specify which spatial and temporal coordinates each measurement + is available. Moreover, this class supports defining the + covariance matrix for all measurements. Parameters ---------- measurement_index_time: a ``dict``, keys are measurement variable names, - if there are extra index, for e.g., Var[scenario, extra_index, time]: - values are a dictionary, keys are its extra index, values are its measuring time points. - if there are no extra index, for e.g., Var[scenario, time]: - values are a list of measuring time point. + + * if there are extra indices, for e.g., Var[scenario, extra_index, time]: + values are a dictionary, keys are its extra index, values are its + measuring time points. + * if there are no extra indices, for e.g., Var[scenario, time]: + values are a list of measuring time point. + For e.g., for the kinetics illustrative example, it should be {'C':{'CA':[0,1,..], 'CB':[0,2,...]}, 'k':[0,4,..]}, so the measurements are C[scenario, 'CA', 0]..., k[scenario, 0].... variance: @@ -60,6 +67,7 @@ def __init__(self, measurement_index_time, variance=None, ind_string='_index_'): ind_string: a ''string'', used to flatten the name of variables and extra index. Default is '_index_'. For e.g., for {'C':{'CA': 10, 'CB': 1, 'CC': 2}}, the reformulated name is 'C_index_CA'. + """ self.measurement_all_info = measurement_index_time self.ind_string = ind_string @@ -85,12 +93,14 @@ def _name_and_index_generator(self, all_info): """ Generate a dictionary, keys are the variable names, values are the indexes of this variable. For e.g., name_and_index = {'C': ['CA', 'CB', 'CC']} + Parameters - --------- + ---------- all_info: a dictionary, keys are measurement variable names, - values are a dictionary, keys are its extra index, values are its measuring time points - values are a list of measuring time point if there is no extra index for this measurement + values are a dictionary, keys are its extra index, values are its measuring time points + values are a list of measuring time point if there is no extra index for this measurement Note: all_info can be the self.measurement_all_info, but does not have to be it. + """ measurement_name = list(all_info.keys()) # a list of measurement extra indexes @@ -108,13 +118,15 @@ def _name_and_index_generator(self, all_info): def _generate_flatten_name(self, measure_name_and_index): """ Generate measurement flattened names + Parameters - ---------- + ----------- measure_name_and_index: a dictionary, keys are measurement names, values are lists of extra indexes Returns - ------ + ------- jac_involved_name: a list of flattened measurement names + """ flatten_names = [] for j in measure_name_and_index.keys(): @@ -130,6 +142,7 @@ def _generate_flatten_name(self, measure_name_and_index): def _generate_variance(self, flatten_measure_name, variance, name_and_index): """ Generate the variance dictionary + Parameters ---------- flatten_measure_name: flattened measurement names. For e.g., flattenning {'C':{'CA': 10, 'CB': 1, 'CC': 2}} will be 'C_index_CA', ..., 'C_index_CC'. @@ -141,6 +154,7 @@ def _generate_variance(self, flatten_measure_name, variance, name_and_index): If there is no extra index, it is a dict, keys are measurement variable names, values are its variance as a scalar number. name_and_index: a dictionary, keys are measurement names, values are a list of extra indexes. + """ flatten_variance = {} for i in flatten_measure_name: @@ -201,6 +215,7 @@ def _model_measure_name(self): def SP_measure_name(self, j, t,scenario_all=None, p=None, mode='sequential_finite', legal_t=True): """Return pyomo string name for different modes + Arguments --------- j: flatten measurement name @@ -210,11 +225,12 @@ def SP_measure_name(self, j, t,scenario_all=None, p=None, mode='sequential_finit mode: mode name, can be 'simultaneous_finite' or 'sequential_finite' legal_t: if the time point is legal for this measurement. default is True - Return - ------ + Returns + ------- up_C, lo_C: two measurement pyomo string names for simultaneous mode legal_t: if the time point is legal for this measurement string_name: one measurement pyomo string name for sequential + """ if mode=='simultaneous_finite': # check extra index @@ -252,6 +268,8 @@ def check_subset(self,subset, throw_error=True, valid_subset=True): """ Check if the subset is correctly defined with right name, index and time. + Parameters + ---------- subset: a ''dict'' where measurement name and index are involved in jacobian calculation throw_error: @@ -273,4 +291,4 @@ def check_subset(self,subset, throw_error=True, valid_subset=True): valid_subset = False if throw_error: raise ValueError('The time of {} is not included as measurements before.'.format(t)) - return valid_subset \ No newline at end of file + return valid_subset diff --git a/pyomo/contrib/doe/result.py b/pyomo/contrib/doe/result.py index 01f977e1e98..0e1929059b9 100644 --- a/pyomo/contrib/doe/result.py +++ b/pyomo/contrib/doe/result.py @@ -43,7 +43,7 @@ def __init__(self, para_name, measure_object, jacobian_info=None, all_jacobian_i """Analyze the FIM result for a single run Parameters - ----------- + ---------- para_name: A ``list`` of parameter names measure_object: @@ -328,8 +328,8 @@ def __init__(self, design_ranges, design_dimension_names, design_control_time, F This class deals with the FIM results from grid search, providing A, D, E, ME-criteria results for each design variable. Can choose to draw 1D sensitivity curves and 2D heatmaps. - Parameters: - ----------- + Parameters + ---------- design_ranges: a ``dict`` whose keys are design variable names, values are a list of design variable values to go over design_dimension_names: @@ -356,7 +356,7 @@ def extract_criteria(self): """ Extract design criteria values for every 'grid' (design variable combination) searched. - Returns: + Returns ------- self.store_all_results_dataframe: a pandas dataframe with columns as design variable names and A, D, E, ME-criteria names. Each row contains the design variable value for this 'grid', and the 4 design criteria value for this 'grid'. diff --git a/pyomo/contrib/pynumero/algorithms/solvers/implicit_functions.py b/pyomo/contrib/pynumero/algorithms/solvers/implicit_functions.py new file mode 100644 index 00000000000..443d4f3a35c --- /dev/null +++ b/pyomo/contrib/pynumero/algorithms/solvers/implicit_functions.py @@ -0,0 +1,649 @@ +# ___________________________________________________________________________ +# +# Pyomo: Python Optimization Modeling Objects +# Copyright (c) 2008-2022 +# National Technology and Engineering Solutions of Sandia, LLC +# Under the terms of Contract DE-NA0003525 with National Technology and +# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain +# rights in this software. +# This software is distributed under the 3-clause BSD License. +# ___________________________________________________________________________ + +from collections import namedtuple + +from pyomo.common.collections import ComponentSet, ComponentMap +from pyomo.common.timing import HierarchicalTimer +from pyomo.common.dependencies import ( + attempt_import, + numpy as np, +) +from pyomo.core.base.constraint import Constraint +from pyomo.core.base.var import Var +from pyomo.core.base.objective import Objective +from pyomo.core.base.suffix import Suffix +from pyomo.core.expr.visitor import identify_variables +from pyomo.util.calc_var_value import calculate_variable_from_constraint +from pyomo.util.subsystems import ( + TemporarySubsystemManager, + create_subsystem_block, + generate_subsystem_blocks, +) + +# Use attempt_import here due to unguarded NumPy import in these files +pyomo_nlp = attempt_import('pyomo.contrib.pynumero.interfaces.pyomo_nlp')[0] +nlp_proj = attempt_import('pyomo.contrib.pynumero.interfaces.nlp_projections')[0] +from pyomo.contrib.pynumero.algorithms.solvers.cyipopt_solver import ( + cyipopt_available, + CyIpoptNLP, + CyIpoptSolver, +) +from pyomo.contrib.pynumero.algorithms.solvers.scipy_solvers import ( + FsolveNlpSolver, + RootNlpSolver, + NewtonNlpSolver, + SecantNewtonNlpSolver, +) +from pyomo.contrib.incidence_analysis.interface import ( + get_structural_incidence_matrix, +) +from pyomo.contrib.incidence_analysis.matching import maximum_matching +from pyomo.contrib.incidence_analysis import IncidenceGraphInterface +from pyomo.contrib.incidence_analysis.util import ( + generate_strongly_connected_components, +) + + +class NlpSolverBase(object): + """A base class that solves an NLP object + + Subclasses should implement this interface for compatibility with + ImplicitFunctionSolver objects. + + """ + + def __init__(self, nlp, options=None, timer=None): + raise NotImplementedError( + "%s has not implemented the __init__ method" % type(self) + ) + + def solve(self, **kwds): + raise NotImplementedError( + "%s has not implemented the solve method" % type(self) + ) + + +class CyIpoptSolverWrapper(NlpSolverBase): + """A wrapper for CyIpoptNLP and CyIpoptSolver that implements the + NlpSolverBase API + + """ + def __init__(self, nlp, options=None, timer=None): + self._cyipopt_nlp = CyIpoptNLP(nlp) + self._cyipopt_solver = CyIpoptSolver(self._cyipopt_nlp, options=options) + + def solve(self, **kwds): + return self._cyipopt_solver.solve(**kwds) + + +class ScipySolverWrapper(NlpSolverBase): + """A wrapper for SciPy NLP solvers that implements the NlpSolverBase API + + This solver uses scipy.optimize.fsolve for "vector-valued" NLPs (with more + than one primal variable and equality constraint) and the Secant-Newton + hybrid for "scalar-valued" NLPs. + + """ + def __init__(self, nlp, timer=None, options=None): + if options is None: + options = {} + for key in options: + if ( + key not in SecantNewtonNlpSolver.OPTIONS + and key not in FsolveNlpSolver.OPTIONS + ): + raise ValueError( + "Option %s is invalid for both SecantNewtonNlpSolver and" + " FsolveNlpSolver" % key + ) + # Note that options currently contain the options for both solvers. + # There is currently no way to specify, e.g., different tolerances + # for the two solvers. This can be updated if there is demand for it. + newton_options = { + key: value for key, value in options.items() + if key in SecantNewtonNlpSolver.OPTIONS + } + fsolve_options = { + key: value for key, value in options.items() + if key in FsolveNlpSolver.OPTIONS + } + if nlp.n_primals() == 1: + solver = SecantNewtonNlpSolver( + nlp, timer=timer, options=newton_options + ) + else: + solver = FsolveNlpSolver( + nlp, timer=timer, options=fsolve_options + ) + self._nlp = nlp + self._options = options + self._solver = solver + + def solve(self, x0=None): + res = self._solver.solve(x0=x0) + return res + + +class PyomoImplicitFunctionBase(object): + """A base class defining an API for implicit functions defined using + Pyomo components. In particular, this is the API required by + ExternalPyomoModel. + + Implicit functions are defined by two lists of Pyomo VarData and + one list of Pyomo ConstraintData. The first list of VarData corresponds + to "variables" defining the outputs of the implicit function. + The list of ConstraintData are equality constraints that are converged + to evaluate the implicit function. The second list of VarData are + variables to be treated as "parameters" or inputs to the implicit + function. + + """ + + def __init__(self, variables, constraints, parameters): + """ + Arguments + --------- + variables: List of VarData + Variables to be treated as outputs of the implicit function + constraints: List of ConstraintData + Constraints that are converged to evaluate the implicit function + parameters: List of VarData + Variables to be treated as inputs to the implicit function + + """ + self._variables = variables + self._constraints = constraints + self._parameters = parameters + self._block_variables = variables + parameters + self._block = create_subsystem_block( + constraints, + self._block_variables, + ) + + def get_variables(self): + return self._variables + + def get_constraints(self): + return self._constraints + + def get_parameters(self): + return self._parameters + + def get_block(self): + return self._block + + def set_parameters(self, values): + """Sets the parameters of the system that defines the implicit + function. + + This method does not necessarily need to update values of the Pyomo + variables, as long as the next evaluation of this implicit function + is consistent with these inputs. + + Arguments + --------- + values: NumPy array + Array of values to set for the "parameter variables" in the order + they were specified in the constructor + + """ + raise NotImplementedError() + + def evaluate_outputs(self): + """Returns the values of the variables that are treated as outputs + of the implicit function + + The returned values do not necessarily need to be the values stored + in the Pyomo variables, as long as they are consistent with the + latest parameters that have been set. + + Returns + ------- + NumPy array + Array with values corresponding to the "output variables" in + the order they were specified in the constructor + + """ + raise NotImplementedError() + + def update_pyomo_model(self): + """Sets values of "parameter variables" and "output variables" + to the most recent values set or computed in this implicit function + + """ + raise NotImplementedError() + + +class ImplicitFunctionSolver(PyomoImplicitFunctionBase): + """A basic implicit function solver that uses a ProjectedNLP to solve + the parameterized system without repeated file writes when parameters + are updated + + """ + + def __init__( + self, + variables, + constraints, + parameters, + solver_class=None, + solver_options=None, + timer=None, + ): + if timer is None: + timer = HierarchicalTimer() + self._timer = timer + if solver_options is None: + solver_options = {} + + self._timer.start("__init__") + + super().__init__(variables, constraints, parameters) + block = self.get_block() + + # PyomoNLP requires an objective + block._obj = Objective(expr=0.0) + # CyIpoptSolver requires a non-empty scaling factor + block.scaling_factor = Suffix(direction=Suffix.EXPORT) + block.scaling_factor[block._obj] = 1.0 + + self._timer.start("PyomoNLP") + self._nlp = pyomo_nlp.PyomoNLP(block) + self._timer.stop("PyomoNLP") + primals_ordering = [var.name for var in variables] + self._proj_nlp = nlp_proj.ProjectedExtendedNLP( + self._nlp, primals_ordering + ) + + self._timer.start("NlpSolver") + if solver_class is None: + self._solver = ScipySolverWrapper( + self._proj_nlp, options=solver_options, timer=timer + ) + else: + self._solver = solver_class( + self._proj_nlp, options=solver_options, timer=timer + ) + self._timer.stop("NlpSolver") + + vars_in_cons = [] + _seen = set() + for con in constraints: + for var in identify_variables(con.expr, include_fixed=False): + if id(var) not in _seen: + _seen.add(id(var)) + vars_in_cons.append(var) + self._active_var_set = ComponentSet(vars_in_cons) + + # It is possible (and fairly common) for variables specified as + # parameters to not appear in any of the specified constraints. + # We will fail if we try to get their coordinates in the NLP. + # + # Technically, this could happen for the variables as well. However, + # this would guarantee that the Jacobian is singular. I will worry + # about this when I encounter such a case. + self._active_param_mask = np.array( + [(p in self._active_var_set) for p in parameters] + ) + self._active_parameters = [ + p for i, p in enumerate(parameters) if self._active_param_mask[i] + ] + if any((var not in self._active_var_set) for var in variables): + raise RuntimeError( + "Invalid model. All variables must appear in specified" + " constraints." + ) + + # These are coordinates in the original NLP + self._variable_coords = self._nlp.get_primal_indices(variables) + self._active_parameter_coords = self._nlp.get_primal_indices( + self._active_parameters + ) + + # NOTE: With this array, we are storing the same data in two locations. + # Once here, and once in the NLP. We do this because parameters do not + # *need* to exist in the NLP. However, we still need to be able to + # update the "parameter variables" in the Pyomo model. If we only stored + # the parameters in the NLP, we would lose the values for parameters + # that don't appear in the active constraints. + self._parameter_values = np.array([var.value for var in parameters]) + + self._timer.start("__init__") + + def set_parameters(self, values, **kwds): + self._timer.start("set_parameters") + # I am not 100% sure the values will always be an array (as opposed to + # list), so explicitly convert here. + values = np.array(values) + self._parameter_values = values + values = np.compress(self._active_param_mask, values) + primals = self._nlp.get_primals() + # Will it cause a problem that _active_parameter_coords is a list + # rather than array? + primals[self._active_parameter_coords] = values + self._nlp.set_primals(primals) + self._timer.start("solve") + results = self._solver.solve(**kwds) + self._timer.stop("solve") + self._timer.stop("set_parameters") + return results + + def evaluate_outputs(self): + primals = self._nlp.get_primals() + outputs = primals[self._variable_coords] + return outputs + + def update_pyomo_model(self): + primals = self._nlp.get_primals() + for i, var in enumerate(self.get_variables()): + var.set_value( + primals[self._variable_coords[i]], + skip_validation=True, + ) + for var, value in zip(self._parameters, self._parameter_values): + var.set_value(value, skip_validation=True) + + +class DecomposedImplicitFunctionBase(PyomoImplicitFunctionBase): + """A base class for an implicit function that applies a partition + to its variables and constraints and converges the system by solving + subsets sequentially + + Subclasses should implement the partition_system method, which + determines how variables and constraints are partitioned into subsets. + + """ + + def __init__( + self, + variables, + constraints, + parameters, + solver_class=None, + solver_options=None, + timer=None, + use_calc_var=True, + ): + if timer is None: + timer = HierarchicalTimer() + self._timer = timer + self._timer.start("__init__") + if solver_class is None: + solver_class = ScipySolverWrapper + self._solver_class = solver_class + if solver_options is None: + solver_options = {} + self._solver_options = solver_options + self._calc_var_cutoff = 1 if use_calc_var else 0 + # NOTE: This super call is only necessary so the get_* methods work + super().__init__(variables, constraints, parameters) + + subsystem_list = [ + # Switch order in list for compatibility with generate_subsystem_blocks + (cons, vars) for vars, cons + in self.partition_system(variables, constraints) + ] + + var_param_set = ComponentSet(variables + parameters) + # We will treat variables that are neither variables nor parameters + # as "constants". These are usually things like area, volume, or some + # other global parameter that is treated as a variable and "fixed" with + # an equality constraint. + constants = [] + constant_set = ComponentSet() + for con in constraints: + for var in identify_variables(con.expr, include_fixed=False): + if var not in constant_set and var not in var_param_set: + # If this is a newly encountered variable that is neither + # a var nor param, treat it as a "constant" + constant_set.add(var) + constants.append(var) + + with TemporarySubsystemManager(to_fix=constants): + # Temporarily fix "constant" variables so (a) they don't show + # up in the local inputs of the subsystem blocks and (b) so + # they don't appear as additional columns in the NLPs and + # ProjectedNLPs. + + self._subsystem_list = list(generate_subsystem_blocks(subsystem_list)) + # These are subsystems that need an external solver, rather than + # calculate_variable_from_constraint. _calc_var_cutoff should be either + # 0 or 1. + self._solver_subsystem_list = [ + (block, inputs) for block, inputs in self._subsystem_list + if len(block.vars) > self._calc_var_cutoff + ] + + # Need a dummy objective to create an NLP + for block, inputs in self._solver_subsystem_list: + block._obj = Objective(expr=0.0) + # I need scaling_factor so Pyomo NLPs I create from these blocks + # don't break when ProjectedNLP calls get_primals_scaling + block.scaling_factor = Suffix(direction=Suffix.EXPORT) + # HACK: scaling_factor just needs to be nonempty + block.scaling_factor[block._obj] = 1.0 + + # Original PyomoNLP for each subset in the partition + # Since we are creating these NLPs with "constants" fixed, these + # variables will not show up in the NLPs + self._timer.start("PyomoNLP") + self._solver_subsystem_nlps = [ + pyomo_nlp.PyomoNLP(block) + for block, inputs in self._solver_subsystem_list + ] + self._timer.stop("PyomoNLP") + + # "Output variable" names are required to construct ProjectedNLPs. + # Ideally, we can eventually replace these with variable indices. + self._solver_subsystem_var_names = [ + [var.name for var in block.vars.values()] + for block, inputs in self._solver_subsystem_list + ] + self._solver_proj_nlps = [ + nlp_proj.ProjectedExtendedNLP(nlp, names) for nlp, names in + zip(self._solver_subsystem_nlps, self._solver_subsystem_var_names) + ] + + # We will solve the ProjectedNLPs rather than the original NLPs + self._timer.start("NlpSolver") + self._nlp_solvers = [ + self._solver_class( + nlp, timer=self._timer, options=self._solver_options + ) for nlp in self._solver_proj_nlps + ] + self._timer.stop("NlpSolver") + self._solver_subsystem_input_coords = [ + # Coordinates in the NLP, not ProjectedNLP + nlp.get_primal_indices(inputs) + for nlp, (subsystem, inputs) in + zip(self._solver_subsystem_nlps, self._solver_subsystem_list) + ] + + self._n_variables = len(variables) + self._n_constraints = len(constraints) + self._n_parameters = len(parameters) + + # This is a global (wrt individual subsystems) array that stores + # the current values of variables and parameters. This is useful + # for updating values in between subsystem solves. + # + # NOTE: This could also be implemented as a tuple of + # (subsystem_coord, primal_coord), which would eliminate the need to + # store data in two locations. The current implementation is easier, + # however. + self._global_values = np.array( + [var.value for var in variables + parameters] + ) + self._global_indices = ComponentMap( + (var, i) for i, var in enumerate(variables + parameters) + ) + # Cache the global array-coordinates of each subset of "input" + # variables. These are used for updating before each solve. + self._local_input_global_coords = [ + # If I do not fix "constants" above, I get errors here + # that only show up in the CLC models. + # TODO: Add a test that covers this edge case. + np.array( + [self._global_indices[var] for var in inputs], + dtype=int, + ) for (_, inputs) in self._solver_subsystem_list + ] + + # Cache the global array-coordinates of each subset of "output" + # variables. These are used for updating after each solve. + self._output_coords = [ + np.array( + [self._global_indices[var] for var in block.vars.values()], + dtype=int, + ) for (block, _) in self._solver_subsystem_list + ] + + self._timer.stop("__init__") + + def n_subsystems(self): + """Returns the number of subsystems in the partition of variables + and equations used to converge the system defining the implicit + function + + """ + return len(self._subsystem_list) + + def partition_system(self, variables, constraints): + """Partitions the systems of equations defined by the provided + variables and constraints + + Each subset of the partition should have an equal number of variables + and equations. These subsets, or "subsystems", will be solved + sequentially in the order provided by this method instead of solving + the entire system simultaneously. Subclasses should implement this + method to define the partition that their implicit function solver + will use. Partitions are defined as a list of tuples of lists. + Each tuple has two entries, the first a list of variables, and the + second a list of constraints. These inner lists should have the + same number of entries. + + Arguments + --------- + variables: list + List of VarData in the system to be partitioned + constraints: list + List of ConstraintData (equality constraints) defining the + equations of the system to be partitioned + + Returns + ------- + List of tuples + List of tuples describing the ordered partition. Each tuple + contains equal-length subsets of variables and constraints. + + """ + # Subclasses should implement this method, which returns an ordered + # partition (two lists-of-lists) of variables and constraints. + raise NotImplementedError( + "%s has not implemented the partition_system method" % type(self) + ) + + def set_parameters(self, values): + self._timer.start("set_parameters") + values = np.array(values) + # + # Set parameter values + # + # NOTE: Here I rely on the fact that the "global array" is in the + # order (variables, parameters) + self._global_values[self._n_variables:] = values + + # + # Solve subsystems one-by-one + # + # The basic procedure is: update local information from the global + # array, solve the subsystem, then update the global array with + # new values. + solver_subsystem_idx = 0 + for block, inputs in self._subsystem_list: + if len(block.vars) <= self._calc_var_cutoff: + # Update model values from global array. + for var in inputs: + idx = self._global_indices[var] + var.set_value( + self._global_values[idx], + skip_validation=True, + ) + # Solve using calculate_variable_from_constraint + var = block.vars[0] + con = block.cons[0] + self._timer.start("solve") + self._timer.start("calc_var") + calculate_variable_from_constraint(var, con) + self._timer.stop("calc_var") + self._timer.stop("solve") + # Update global array with values from solve + self._global_values[self._global_indices[var]] = var.value + else: + # Transfer variable values into the projected NLP, solve, + # and extract values. + + i = solver_subsystem_idx + nlp = self._solver_subsystem_nlps[i] + proj_nlp = self._solver_proj_nlps[i] + input_coords = self._solver_subsystem_input_coords[i] + input_global_coords = self._local_input_global_coords[i] + output_global_coords = self._output_coords[i] + + nlp_solver = self._nlp_solvers[solver_subsystem_idx] + + # Get primals, load potentially new input values into primals, + # then load primals into NLP + primals = nlp.get_primals() + primals[input_coords] = self._global_values[input_global_coords] + + # Set primals in the original NLP. This is necessary so the + # parameters get updated. + nlp.set_primals(primals) + + # Get initial guess in the space of variables we solve for + x0 = proj_nlp.get_primals() + self._timer.start("solve") + self._timer.start("solve_nlp") + nlp_solver.solve(x0=x0) + self._timer.stop("solve_nlp") + self._timer.stop("solve") + + # Set values in global array. Here we rely on the fact that + # the projected NLP's primals are in the order that variables + # were initially specified. + self._global_values[output_global_coords] = proj_nlp.get_primals() + + solver_subsystem_idx += 1 + + self._timer.stop("set_parameters") + + def evaluate_outputs(self): + return self._global_values[:self._n_variables] + + def update_pyomo_model(self): + # NOTE: Here we rely on the fact that global_values is in the + # order (variables, parameters) + for i, var in enumerate(self.get_variables() + self.get_parameters()): + var.set_value(self._global_values[i], skip_validation=True) + + +class SccImplicitFunctionSolver(DecomposedImplicitFunctionBase): + + def partition_system(self, variables, constraints): + self._timer.start("partition") + igraph = IncidenceGraphInterface() + var_blocks, con_blocks = igraph.get_diagonal_blocks( + variables, constraints + ) + self._timer.stop("partition") + return zip(var_blocks, con_blocks) diff --git a/pyomo/contrib/pynumero/algorithms/solvers/scipy_solvers.py b/pyomo/contrib/pynumero/algorithms/solvers/scipy_solvers.py index 6b46b030b15..9b98154871f 100644 --- a/pyomo/contrib/pynumero/algorithms/solvers/scipy_solvers.py +++ b/pyomo/contrib/pynumero/algorithms/solvers/scipy_solvers.py @@ -162,7 +162,6 @@ class NewtonNlpSolver(ScalarDenseSquareNlpSolver): )) def solve(self, x0=None): - self._timer.start("solve") if x0 is None: x0 = self._nlp.get_primals() @@ -178,7 +177,6 @@ def solve(self, x0=None): full_output=self.options.full_output, maxiter=self.options.maxiter, ) - self._timer.stop("solve") return results @@ -217,7 +215,6 @@ def __init__(self, nlp, timer=None, options=None): self.converged_with_secant = None def solve(self, x0=None): - self._timer.start("solve") if x0 is None: x0 = self._nlp.get_primals() @@ -242,7 +239,6 @@ def solve(self, x0=None): maxiter=self.options.newton_iter, full_output=self.options.full_output, ) - self._timer.stop("solve") return results diff --git a/pyomo/contrib/pynumero/algorithms/solvers/square_solver_base.py b/pyomo/contrib/pynumero/algorithms/solvers/square_solver_base.py index e8bfbace689..7ce96966e0d 100644 --- a/pyomo/contrib/pynumero/algorithms/solvers/square_solver_base.py +++ b/pyomo/contrib/pynumero/algorithms/solvers/square_solver_base.py @@ -9,8 +9,10 @@ # This software is distributed under the 3-clause BSD License. # ___________________________________________________________________________ +from collections import namedtuple from pyomo.common.timing import HierarchicalTimer from pyomo.common.config import ConfigBlock +from pyomo.util.subsystems import create_subsystem_block class SquareNlpSolverBase(object): diff --git a/pyomo/contrib/pynumero/algorithms/solvers/tests/test_implicit_functions.py b/pyomo/contrib/pynumero/algorithms/solvers/tests/test_implicit_functions.py new file mode 100644 index 00000000000..59325c7a7a2 --- /dev/null +++ b/pyomo/contrib/pynumero/algorithms/solvers/tests/test_implicit_functions.py @@ -0,0 +1,384 @@ +# ___________________________________________________________________________ +# +# Pyomo: Python Optimization Modeling Objects +# Copyright (c) 2008-2022 +# National Technology and Engineering Solutions of Sandia, LLC +# Under the terms of Contract DE-NA0003525 with National Technology and +# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain +# rights in this software. +# This software is distributed under the 3-clause BSD License. +# ___________________________________________________________________________ + +import itertools +import pyomo.common.unittest as unittest +import pyomo.environ as pyo +from pyomo.common.dependencies import ( + scipy, + scipy_available, + numpy as np, + numpy_available, +) +from pyomo.contrib.pynumero.asl import AmplInterface +from pyomo.contrib.pynumero.algorithms.solvers.implicit_functions import ( + CyIpoptSolverWrapper, + ImplicitFunctionSolver, + DecomposedImplicitFunctionBase, + SccImplicitFunctionSolver, +) +from pyomo.contrib.pynumero.algorithms.solvers.cyipopt_solver import ( + cyipopt_available, +) + +if not scipy_available or not numpy_available: + # SciPy is only really necessary as it is a dependency of AmplInterface. + # NumPy is directly used by the implicit function solvers. + raise unittest.SkipTest( + "NumPy and SciPy are needed to test the implicit function solvers" + ) +if not AmplInterface.available(): + # AmplInterface is not theoretically necessary for these solvers, + # however it is the only AD backend implemented for PyomoNLP. + raise unittest.SkipTest( + "PyNumero ASL extension is necessary to test implicit" + " function solvers" + ) + + +class ImplicitFunction1(object): + + def __init__(self): + self._model = self._make_model() + + def _make_model(self): + m = pyo.ConcreteModel() + m.I = pyo.Set(initialize=[1, 2, 3]) + m.J = pyo.Set(initialize=[1, 2]) + m.x = pyo.Var(m.I, initialize=1.0) + m.p = pyo.Var(m.J, initialize=1.0) + # Note that this system of constraints decomposes. First con1 + # and con3 are used to solve for x[2] and x[3], then con2 + # is used to solve for x[1] + m.con1 = pyo.Constraint( + expr=m.x[2]**2 + m.x[3]**2 == m.p[1] + ) + m.con2 = pyo.Constraint( + expr=2*m.x[1] + 3*m.x[2] - 4*m.x[3] == m.p[1]**2 - m.p[2] + ) + m.con3 = pyo.Constraint( + expr=m.p[2]**1.5 == 2*pyo.exp(m.x[2]/m.x[3]) + ) + m.obj = pyo.Objective(expr=0.0) + return m + + def get_parameters(self): + m = self._model + return [m.p[1], m.p[2]] + + def get_variables(self): + m = self._model + return [m.x[1], m.x[2], m.x[3]] + + def get_equations(self): + m = self._model + return [m.con1, m.con2, m.con3] + + def get_input_output_sequence(self): + p1_inputs = [1.0, 2.0, 3.0] + p2_inputs = [1.0, 2.0, 3.0] + inputs = list(itertools.product(p1_inputs, p2_inputs)) + + outputs = [ + # Outputs computed by solving system with Ipopt + (2.498253, -0.569676, 0.821869), + (0.898530, 0.327465, 0.944863), + (-0.589294, 0.690561, 0.723274), + (5.033063, -0.805644, 1.162299), + (2.977820, 0.463105, 1.336239), + (1.080826, 0.976601, 1.022864), + (8.327101, -0.986708, 1.423519), + (5.922325, 0.567186, 1.636551), + (3.711364, 1.196087, 1.252747), + ] + # We will iterate over these input/output pairs, set inputs, + # solve, and check outputs. Note that these values are computed + # with Ipopt, and the default solver for the system defining the + # implicit function is scipy.optimize.fsolve. There is no guarantee + # that these algorithms converge to the same solution for the + # highly nonlinear system defining this implicit function. If some + # of these tests start failing (e.g. because one of these algorithms + # changes), some of these inputs may need to be omitted. + return list(zip(inputs, outputs)) + + +class ImplicitFunctionWithExtraVariables(ImplicitFunction1): + """This is the same system as ImplicitFunction1, but now some + of the hand-coded constants have been replaced by unfixed variables. + These variables will be completely ignored and treated as constants + by the implicit functions. + + """ + + def _make_model(self): + m = pyo.ConcreteModel() + m.I = pyo.Set(initialize=[1, 2, 3]) + m.J = pyo.Set(initialize=[1, 2]) + m.K = pyo.Set(initialize=[1, 2, 3]) + m.x = pyo.Var(m.I, initialize=1.0) + m.p = pyo.Var(m.J, initialize=1.0) + + # These variables will be treated as neither outputs nor + # inputs. They are simply treated as constants. + m.const = pyo.Var(m.K, initialize=1.0) + m.const[1].set_value(1.0) + m.const[2].set_value(2.0) + m.const[3].set_value(1.5) + + m.con1 = pyo.Constraint( + expr=m.const[1]*m.x[2]**2 + m.x[3]**2 == m.p[1] + ) + m.con2 = pyo.Constraint( + expr=m.const[2]*m.x[1] + 3*m.x[2] - 4*m.x[3] == m.p[1]**2 - m.p[2] + ) + m.con3 = pyo.Constraint( + expr=m.p[2]**m.const[3] == 2*pyo.exp(m.x[2]/m.x[3]) + ) + m.obj = pyo.Objective(expr=0.0) + return m + + +class ImplicitFunctionInputsDontAppear(object): + """This is an implicit function designed to test the edge case + where inputs do not appear in the system defining the implicit + function (i.e. the function is constant). + + """ + def __init__(self): + self._model = self._make_model() + + def _make_model(self): + m = pyo.ConcreteModel() + m.I = pyo.Set(initialize=[1, 2, 3]) + m.J = pyo.Set(initialize=[1, 2]) + m.x = pyo.Var(m.I, initialize=1.0) + m.p = pyo.Var(m.J, initialize=1.0) + m.con1 = pyo.Constraint( + expr=m.x[2]**2 + m.x[3]**2 == 1.0 + ) + m.con2 = pyo.Constraint( + expr=2*m.x[1] + 3*m.x[2] - 4*m.x[3] == 0.0 + ) + m.con3 = pyo.Constraint( + expr=1.0 == 2*pyo.exp(m.x[2]/m.x[3]) + ) + m.obj = pyo.Objective(expr=0.0) + return m + + def get_parameters(self): + m = self._model + return [m.p[1], m.p[2]] + + def get_variables(self): + m = self._model + return [m.x[1], m.x[2], m.x[3]] + + def get_equations(self): + m = self._model + return [m.con1, m.con2, m.con3] + + def get_input_output_sequence(self): + # As the implicit function is constant, these parameter + # values don't matter + p1_inputs = [-1.0, 0.0] + p2_inputs = [1.0] + inputs = list(itertools.product(p1_inputs, p2_inputs)) + + outputs = [ + # Outputs computed by solving system with Ipopt + (2.498253, -0.569676, 0.821869), + (2.498253, -0.569676, 0.821869), + ] + return list(zip(inputs, outputs)) + + +class ImplicitFunctionNoInputs(ImplicitFunctionInputsDontAppear): + """The same system as with inputs that don't appear, but now the + inputs are not provided to the implicit function solver + + """ + def get_parameters(self): + return [] + + def get_input_output_sequence(self): + inputs = [()] + outputs = [ + # Outputs computed by solving system with Ipopt + (2.498253, -0.569676, 0.821869), + ] + return list(zip(inputs, outputs)) + + +class _TestSolver(unittest.TestCase): + """A suite of basic tests for implicit function solvers. + + A "concrete" subclass should be defined for each implicit function + solver. This subclass should implement get_solver_class, then + add "test" methods that call the following methods: + + _test_implicit_function_1 + _test_implicit_function_inputs_dont_appear + _test_implicit_function_no_inputs + _test_implicit_function_with_extra_variables + + These methods are private so they don't get picked up on the base + class by pytest. + + """ + + def get_solver_class(self): + raise NotImplementedError() + + def _test_implicit_function(self, ImplicitFunctionClass, **kwds): + SolverClass = self.get_solver_class() + fcn = ImplicitFunctionClass() + variables = fcn.get_variables() + parameters = fcn.get_parameters() + equations = fcn.get_equations() + + solver = SolverClass(variables, equations, parameters, **kwds) + + for inputs, pred_outputs in fcn.get_input_output_sequence(): + solver.set_parameters(inputs) + outputs = solver.evaluate_outputs() + self.assertStructuredAlmostEqual( + list(outputs), list(pred_outputs), reltol=1e-5, abstol=1e-5 + ) + + solver.update_pyomo_model() + for i, var in enumerate(variables): + self.assertAlmostEqual( + var.value, pred_outputs[i], delta=1e-5 + ) + + def _test_implicit_function_1(self, **kwds): + self._test_implicit_function(ImplicitFunction1, **kwds) + + def _test_implicit_function_inputs_dont_appear(self): + self._test_implicit_function(ImplicitFunctionInputsDontAppear) + + def _test_implicit_function_no_inputs(self): + self._test_implicit_function(ImplicitFunctionNoInputs) + + def _test_implicit_function_with_extra_variables(self): + self._test_implicit_function(ImplicitFunctionWithExtraVariables) + + +class TestImplicitFunctionSolver(_TestSolver): + + def get_solver_class(self): + return ImplicitFunctionSolver + + def test_bad_option(self): + msg = "Option.*is invalid" + with self.assertRaisesRegex(ValueError, msg): + self._test_implicit_function_1( + solver_options=dict(bad_option=None) + ) + + def test_implicit_function_1(self): + self._test_implicit_function_1() + + @unittest.skipUnless(cyipopt_available, "CyIpopt is not available") + def test_implicit_function_1_with_cyipopt(self): + self._test_implicit_function_1(solver_class=CyIpoptSolverWrapper) + + def test_implicit_function_inputs_dont_appear(self): + self._test_implicit_function_inputs_dont_appear() + + def test_implicit_function_no_inputs(self): + self._test_implicit_function_no_inputs() + + def test_implicit_function_with_extra_variables(self): + self._test_implicit_function_with_extra_variables() + + +class TestSccImplicitFunctionSolver(_TestSolver): + + def get_solver_class(self): + return SccImplicitFunctionSolver + + def test_partition_not_implemented(self): + fcn = ImplicitFunction1() + variables = fcn.get_variables() + parameters = fcn.get_parameters() + equations = fcn.get_equations() + msg = "has not implemented" + with self.assertRaisesRegex(NotImplementedError, msg): + solver = DecomposedImplicitFunctionBase( + variables, equations, parameters + ) + + def test_n_subsystems(self): + SolverClass = self.get_solver_class() + fcn = ImplicitFunction1() + variables = fcn.get_variables() + parameters = fcn.get_parameters() + equations = fcn.get_equations() + solver = SolverClass(variables, equations, parameters) + + # Assert that the system decomposes into two subsystems. + self.assertEqual(solver.n_subsystems(), 2) + + def test_implicit_function_1(self): + self._test_implicit_function_1() + + @unittest.skipUnless(cyipopt_available, "CyIpopt is not available") + def test_implicit_function_1_with_cyipopt(self): + self._test_implicit_function_1(solver_class=CyIpoptSolverWrapper) + + def test_implicit_function_1_no_calc_var(self): + self._test_implicit_function_1( + use_calc_var=False, + solver_options={"maxfev": 20}, + ) + + def test_implicit_function_inputs_dont_appear(self): + self._test_implicit_function_inputs_dont_appear() + + def test_implicit_function_no_inputs(self): + self._test_implicit_function_no_inputs() + + def test_implicit_function_with_extra_variables(self): + self._test_implicit_function_with_extra_variables() + + +def _solve_with_ipopt(): + from pyomo.util.subsystems import TemporarySubsystemManager + ipopt = pyo.SolverFactory("ipopt") + fcn = ImplicitFunctionInputsDontAppear() + m = fcn._model + params = fcn.get_parameters() + variables = fcn.get_variables() + input_list = [] + output_list = [] + error_list = [] + for (p1, p2), _ in fcn.get_input_output_sequence(): + params[0].set_value(p1) + params[1].set_value(p2) + input_list.append((p1, p2)) + with TemporarySubsystemManager(to_fix=params): + try: + res = ipopt.solve(m, tee=True) + pyo.assert_optimal_termination(res) + error_list.append(False) + except (ValueError, AssertionError, RuntimeError): + error_list.append(True) + output_list.append(tuple(var.value for var in variables)) + for i, (inputs, outputs) in enumerate(zip(input_list, output_list)): + print(inputs, outputs, error_list[i]) + for outputs in output_list: + print("(%1.6f, %1.6f, %1.6f)," % outputs) + + +if __name__ == "__main__": + #unittest.main() + _solve_with_ipopt() diff --git a/pyomo/contrib/pynumero/interfaces/external_pyomo_model.py b/pyomo/contrib/pynumero/interfaces/external_pyomo_model.py index de1541e58b7..d14afe503c9 100644 --- a/pyomo/contrib/pynumero/interfaces/external_pyomo_model.py +++ b/pyomo/contrib/pynumero/interfaces/external_pyomo_model.py @@ -10,30 +10,20 @@ # ___________________________________________________________________________ import itertools -from pyomo.environ import SolverFactory from pyomo.core.base.var import Var from pyomo.core.base.constraint import Constraint from pyomo.core.base.objective import Objective from pyomo.core.expr.visitor import identify_variables -from pyomo.common.collections import ComponentSet -from pyomo.core.base.suffix import Suffix -from pyomo.util.calc_var_value import calculate_variable_from_constraint +from pyomo.common.timing import HierarchicalTimer from pyomo.util.subsystems import ( create_subsystem_block, - TemporarySubsystemManager, ) from pyomo.contrib.pynumero.interfaces.pyomo_nlp import PyomoNLP from pyomo.contrib.pynumero.interfaces.external_grey_box import ( ExternalGreyBoxModel, ) -from pyomo.contrib.pynumero.interfaces.nlp_projections import ProjectedNLP -from pyomo.contrib.pynumero.algorithms.solvers.cyipopt_solver import ( - cyipopt_available, - CyIpoptNLP, - CyIpoptSolver, -) -from pyomo.contrib.incidence_analysis.util import ( - generate_strongly_connected_components, +from pyomo.contrib.pynumero.algorithms.solvers.implicit_functions import ( + SccImplicitFunctionSolver, ) import numpy as np import scipy.sparse as sps @@ -143,14 +133,16 @@ class ExternalPyomoModel(ExternalGreyBoxModel): """ - def __init__(self, - input_vars, - external_vars, - residual_cons, - external_cons, - use_cyipopt=None, - solver=None, - ): + def __init__( + self, + input_vars, + external_vars, + residual_cons, + external_cons, + solver_class=None, + solver_options=None, + timer=None, + ): """ Arguments: ---------- @@ -164,94 +156,46 @@ def __init__(self, external_cons: list List of equality constraints used to solve for the external variables - use_cyipopt: bool - Whether to use CyIpopt to solve strongly connected components of - the implicit function that have dimension greater than one. - solver: Pyomo solver object - Used to solve strongly connected components of the implicit function - that have dimension greater than one. Only used if use_cyipopt - is False. + solver_class: Subclass of ImplicitFunctionSolver + The solver object that is used to converge the system of + equations defining the implicit function. + solver_options: dict + Options dict for the ImplicitFunctionSolver + timer: HierarchicalTimer + HierarchicalTimer object to which new timing categories introduced + will be attached. If None, a new timer will be created. """ - if use_cyipopt is None: - use_cyipopt = cyipopt_available - if use_cyipopt and not cyipopt_available: - raise RuntimeError( - "Constructing an ExternalPyomoModel with CyIpopt unavailable. " - "Please set the use_cyipopt argument to False." - ) - if solver is not None and use_cyipopt: - raise RuntimeError( - "Constructing an ExternalPyomoModel with a solver specified " - "and use_cyipopt set to True. Please set use_cyipopt to False " - "to use the desired solver." - ) - elif solver is None and not use_cyipopt: - solver = SolverFactory("ipopt") - # If use_cyipopt is True, this solver is None and will not be used. - self._solver = solver - self._use_cyipopt = use_cyipopt + if timer is None: + timer = HierarchicalTimer() + self._timer = timer + if solver_class is None: + solver_class = SccImplicitFunctionSolver + self._solver_class = solver_class + if solver_options is None: + solver_options = {} + + self._timer.start("__init__") # We only need this block to construct the NLP, which wouldn't # be necessary if we could compute Hessians of Pyomo constraints. self._block = create_subsystem_block( - residual_cons+external_cons, - input_vars+external_vars, - ) + residual_cons+external_cons, + input_vars+external_vars, + ) self._block._obj = Objective(expr=0.0) + self._timer.start("PyomoNLP") self._nlp = PyomoNLP(self._block) + self._timer.stop("PyomoNLP") - self._scc_list = list(generate_strongly_connected_components( - external_cons, variables=external_vars - )) - - if use_cyipopt: - # Using CyIpopt allows us to solve inner problems without - # costly rewriting of the nl file. It requires quite a bit - # of preprocessing, however, to construct the ProjectedNLP - # for each block of the decomposition. - - # Get "vector-valued" SCCs, those of dimension > 0. - # We will solve these with a direct IPOPT interface, which requires - # some preprocessing. - self._vector_scc_list = [ - (scc, inputs) for scc, inputs in self._scc_list - if len(scc.vars) > 1 - ] - - # Need a dummy objective to create an NLP - for scc, inputs in self._vector_scc_list: - scc._obj = Objective(expr=0.0) - - # I need scaling_factor so Pyomo NLPs I create from these blocks - # don't break when ProjectedNLP calls get_primals_scaling - scc.scaling_factor = Suffix(direction=Suffix.EXPORT) - # HACK: scaling_factor just needs to be nonempty. - scc.scaling_factor[scc._obj] = 1.0 - - # These are the "original NLPs" that will be projected - self._vector_scc_nlps = [ - PyomoNLP(scc) for scc, inputs in self._vector_scc_list - ] - self._vector_scc_var_names = [ - [var.name for var in scc.vars.values()] - for scc, inputs in self._vector_scc_list - ] - self._vector_proj_nlps = [ - ProjectedNLP(nlp, names) for nlp, names in - zip(self._vector_scc_nlps, self._vector_scc_var_names) - ] - - # We will solve the ProjectedNLPs rather than the original NLPs - self._cyipopt_nlps = [CyIpoptNLP(nlp) for nlp in self._vector_proj_nlps] - self._cyipopt_solvers = [ - CyIpoptSolver(nlp) for nlp in self._cyipopt_nlps - ] - self._vector_scc_input_coords = [ - nlp.get_primal_indices(inputs) - for nlp, (scc, inputs) in - zip(self._vector_scc_nlps, self._vector_scc_list) - ] + # Instantiate a solver with the ImplicitFunctionSolver API: + self._solver = self._solver_class( + external_vars, + external_cons, + input_vars, + timer=self._timer, + **solver_options, + ) assert len(external_vars) == len(external_cons) @@ -263,6 +207,12 @@ def __init__(self, self.residual_con_multipliers = [None for _ in residual_cons] self.residual_scaling_factors = None + self._input_output_coords = self._nlp.get_primal_indices( + input_vars + external_vars + ) + + self._timer.stop("__init__") + def n_inputs(self): return len(self.input_vars) @@ -276,74 +226,27 @@ def equality_constraint_names(self): return ["residual_%i" % i for i in range(self.n_equality_constraints())] def set_input_values(self, input_values): + self._timer.start("set_inputs") + solver = self._solver external_cons = self.external_cons external_vars = self.external_vars input_vars = self.input_vars - for var, val in zip(input_vars, input_values): - var.set_value(val, skip_validation=True) - - vector_scc_idx = 0 - for block, inputs in self._scc_list: - if len(block.vars) == 1: - calculate_variable_from_constraint( - block.vars[0], block.cons[0] - ) - else: - if self._use_cyipopt: - # Transfer variable values into the projected NLP, solve, - # and extract values. - - nlp = self._vector_scc_nlps[vector_scc_idx] - proj_nlp = self._vector_proj_nlps[vector_scc_idx] - input_coords = self._vector_scc_input_coords[vector_scc_idx] - cyipopt = self._cyipopt_solvers[vector_scc_idx] - _, local_inputs = self._vector_scc_list[vector_scc_idx] - - primals = nlp.get_primals() - variables = nlp.get_pyomo_variables() - - # Set values and bounds from inputs to the SCC. - # This works because values have been set in the original - # pyomo model, either by a previous SCC solve, or from the - # "global inputs" - for i, var in zip(input_coords, local_inputs): - # Set primals (inputs) in the original NLP - primals[i] = var.value - # This affects future evaluations in the ProjectedNLP - nlp.set_primals(primals) - x0 = proj_nlp.get_primals() - sol, _ = cyipopt.solve(x0=x0) - - # Set primals from solution in projected NLP. This updates - # values in the original NLP - proj_nlp.set_primals(sol) - # I really only need to set new primals for the variables in - # the ProjectedNLP. However, I can only get a list of variables - # from the original Pyomo NLP, so here some of the values I'm - # setting are redundant. - new_primals = nlp.get_primals() - assert len(new_primals) == len(variables) - for var, val in zip(variables, new_primals): - var.set_value(val, skip_validation=True) - - else: - # Use a Pyomo solver to solve this strongly connected - # component. - with TemporarySubsystemManager(to_fix=inputs): - solver.solve(block) - - vector_scc_idx += 1 + solver.set_parameters(input_values) + outputs = solver.evaluate_outputs() + solver.update_pyomo_model() + # # Send updated variable values to NLP for dervative evaluation + # primals = self._nlp.get_primals() - to_update = input_vars + external_vars - indices = self._nlp.get_primal_indices(to_update) - values = np.fromiter((var.value for var in to_update), float) - primals[indices] = values + values = np.concatenate((input_values, outputs)) + primals[self._input_output_coords] = values self._nlp.set_primals(primals) + self._timer.stop("set_inputs") + def set_equality_constraint_multipliers(self, eq_con_multipliers): """ Sets multipliers for residual equality constraints seen by the @@ -435,6 +338,8 @@ def evaluate_equality_constraints(self): return self._nlp.extract_subvector_constraints(self.residual_cons) def evaluate_jacobian_equality_constraints(self): + self._timer.start("jacobian") + nlp = self._nlp x = self.input_vars y = self.external_vars @@ -459,7 +364,10 @@ def evaluate_jacobian_equality_constraints(self): # be nonzero. Here, this is all of the entries. dfdx = jfx + jfy.dot(dydx) - return _dense_to_full_sparse(dfdx) + full_sparse = _dense_to_full_sparse(dfdx) + + self._timer.stop("jacobian") + return full_sparse def evaluate_jacobian_external_variables(self): nlp = self._nlp @@ -570,6 +478,8 @@ def evaluate_hessian_equality_constraints(self): due to these equality constraints. """ + self._timer.start("hessian") + # External multipliers must be calculated after both primals and duals # are set, and are only necessary for this Hessian calculation. # We know this Hessian calculation wants to use the most recently @@ -585,7 +495,9 @@ def evaluate_hessian_equality_constraints(self): # Hessian-of-Lagrangian term in the full space. hess_lag = self.calculate_reduced_hessian_lagrangian(hlxx, hlxy, hlyy) sparse = _dense_to_full_sparse(hess_lag) - return sps.tril(sparse) + lower_triangle = sps.tril(sparse) + self._timer.stop("hessian") + return lower_triangle def set_equality_constraint_scaling_factors(self, scaling_factors): """ diff --git a/pyomo/contrib/pynumero/interfaces/tests/test_external_asl_function.py b/pyomo/contrib/pynumero/interfaces/tests/test_external_asl_function.py index 358821bc180..50f4361490a 100644 --- a/pyomo/contrib/pynumero/interfaces/tests/test_external_asl_function.py +++ b/pyomo/contrib/pynumero/interfaces/tests/test_external_asl_function.py @@ -20,7 +20,7 @@ raise unittest.SkipTest( "Pynumero needs the ASL extension to run NLP tests") from pyomo.contrib.pynumero.interfaces.pyomo_nlp import PyomoNLP -from pyomo.common.getGSL import find_GSL +from pyomo.common.gsl import find_GSL from pyomo.environ import ConcreteModel, ExternalFunction, Var, Objective class TestAMPLExternalFunction(unittest.TestCase): diff --git a/pyomo/contrib/pynumero/interfaces/tests/test_external_pyomo_block.py b/pyomo/contrib/pynumero/interfaces/tests/test_external_pyomo_block.py index 2367cbb1d29..9d908d0084b 100644 --- a/pyomo/contrib/pynumero/interfaces/tests/test_external_pyomo_block.py +++ b/pyomo/contrib/pynumero/interfaces/tests/test_external_pyomo_block.py @@ -16,7 +16,10 @@ import pyomo.environ as pyo from pyomo.contrib.pynumero.dependencies import ( - numpy as np, numpy_available, scipy, scipy_available + numpy as np, + numpy_available, + scipy, + scipy_available, ) if not (numpy_available and scipy_available): @@ -25,14 +28,19 @@ from pyomo.common.dependencies.scipy import sparse as sps from pyomo.contrib.pynumero.asl import AmplInterface + if not AmplInterface.available(): raise unittest.SkipTest( - "Pynumero needs the ASL extension to run cyipopt tests") + "Pynumero needs the ASL extension to run cyipopt tests" + ) from pyomo.contrib.pynumero.algorithms.solvers.cyipopt_solver import ( cyipopt_available, ) -import pyomo.contrib.pynumero.interfaces.external_pyomo_model as epm_module +from pyomo.contrib.pynumero.algorithms.solvers.implicit_functions import ( + CyIpoptSolverWrapper, + ImplicitFunctionSolver, +) from pyomo.contrib.pynumero.interfaces.external_pyomo_model import ( ExternalPyomoModel, get_hessian_of_constraint, @@ -50,9 +58,7 @@ ) if not pyo.SolverFactory("ipopt").available(): - raise unittest.SkipTest( - "Need IPOPT to run ExternalPyomoModel tests" - ) + raise unittest.SkipTest("Need IPOPT to run ExternalPyomoModel tests") def _make_external_model(): @@ -66,12 +72,12 @@ def _make_external_model(): m.y_out = pyo.Var() m.c_out_1 = pyo.Constraint(expr=m.x_out - m.x == 0) m.c_out_2 = pyo.Constraint(expr=m.y_out - m.y == 0) - m.c_ex_1 = pyo.Constraint(expr= - m.x**3 - 2*m.y == m.a**2 + m.b**3 - m.r**3 - 2 - ) - m.c_ex_2 = pyo.Constraint(expr= - m.x + m.y**3 == m.a**3 + 2*m.b**2 + m.r**2 + 1 - ) + m.c_ex_1 = pyo.Constraint( + expr=m.x**3 - 2 * m.y == m.a**2 + m.b**3 - m.r**3 - 2 + ) + m.c_ex_2 = pyo.Constraint( + expr=m.x + m.y**3 == m.a**3 + 2 * m.b**2 + m.r**2 + 1 + ) return m @@ -90,8 +96,9 @@ def linking_constraint_rule(m, i): elif i == 2: return m.r == m.ex_block.inputs["input_2"] - m.linking_constraint = pyo.Constraint(range(n_inputs), - rule=linking_constraint_rule) + m.linking_constraint = pyo.Constraint( + range(n_inputs), rule=linking_constraint_rule + ) def _add_nonlinear_linking_constraints(m): @@ -106,14 +113,15 @@ def _add_nonlinear_linking_constraints(m): def linking_constraint_rule(m, i): if i == 0: - return m.a**2 - 0.5*m.ex_block.inputs["input_0"]**2 == 0 + return m.a**2 - 0.5 * m.ex_block.inputs["input_0"] ** 2 == 0 elif i == 1: - return m.b**2 - 0.5*m.ex_block.inputs["input_1"]**2 == 0 + return m.b**2 - 0.5 * m.ex_block.inputs["input_1"] ** 2 == 0 elif i == 2: - return m.r**2 - 0.5*m.ex_block.inputs["input_2"]**2 == 0 + return m.r**2 - 0.5 * m.ex_block.inputs["input_2"] ** 2 == 0 - m.linking_constraint = pyo.Constraint(range(n_inputs), - rule=linking_constraint_rule) + m.linking_constraint = pyo.Constraint( + range(n_inputs), rule=linking_constraint_rule + ) def make_dynamic_model(): @@ -121,7 +129,7 @@ def make_dynamic_model(): m.time = pyo.Set(initialize=[0, 1, 2]) m = pyo.ConcreteModel() - m.time = pyo.Set(initialize=[0, 1, 2]) + m.time = pyo.Set(initialize=[0, 1, 2]) t0 = m.time.first() m.h = pyo.Var(m.time, initialize=1.0) @@ -133,6 +141,7 @@ def make_dynamic_model(): def h_diff_eqn_rule(m, t): return m.dhdt[t] - (m.flow_in[t] - m.flow_out[t]) == 0 + m.h_diff_eqn = pyo.Constraint(m.time, rule=h_diff_eqn_rule) def dhdt_disc_eqn_rule(m, t): @@ -140,19 +149,20 @@ def dhdt_disc_eqn_rule(m, t): return pyo.Constraint.Skip else: t_prev = m.time.prev(t) - delta_t = (t - t_prev) - return m.dhdt[t] - delta_t*(m.h[t] - m.h[t_prev]) == 0 + delta_t = t - t_prev + return m.dhdt[t] - delta_t * (m.h[t] - m.h[t_prev]) == 0 + m.dhdt_disc_eqn = pyo.Constraint(m.time, rule=dhdt_disc_eqn_rule) def flow_out_eqn(m, t): - return m.flow_out[t] == m.flow_coef*m.h[t]**0.5 + return m.flow_out[t] == m.flow_coef * m.h[t] ** 0.5 + m.flow_out_eqn = pyo.Constraint(m.time, rule=flow_out_eqn) return m class TestExternalGreyBoxBlock(unittest.TestCase): - def test_construct_scalar(self): m = pyo.ConcreteModel() m.ex_block = ExternalGreyBoxBlock(concrete=True) @@ -165,11 +175,11 @@ def test_construct_scalar(self): residual_cons = [m_ex.c_out_1, m_ex.c_out_2] external_cons = [m_ex.c_ex_1, m_ex.c_ex_2] ex_model = ExternalPyomoModel( - input_vars, - external_vars, - residual_cons, - external_cons, - ) + input_vars, + external_vars, + residual_cons, + external_cons, + ) block.set_external_model(ex_model) self.assertEqual(len(block.inputs), len(input_vars)) @@ -186,11 +196,11 @@ def test_construct_indexed(self): residual_cons = [m_ex.c_out_1, m_ex.c_out_2] external_cons = [m_ex.c_ex_1, m_ex.c_ex_2] ex_model = ExternalPyomoModel( - input_vars, - external_vars, - residual_cons, - external_cons, - ) + input_vars, + external_vars, + residual_cons, + external_cons, + ) for i in block: b = block[i] @@ -211,11 +221,11 @@ def test_solve_square(self): residual_cons = [m_ex.c_out_1, m_ex.c_out_2] external_cons = [m_ex.c_ex_1, m_ex.c_ex_2] ex_model = ExternalPyomoModel( - input_vars, - external_vars, - residual_cons, - external_cons, - ) + input_vars, + external_vars, + residual_cons, + external_cons, + ) block.set_external_model(ex_model) _add_linking_constraints(m) @@ -256,11 +266,11 @@ def test_optimize(self): residual_cons = [m_ex.c_out_1, m_ex.c_out_2] external_cons = [m_ex.c_ex_1, m_ex.c_ex_2] ex_model = ExternalPyomoModel( - input_vars, - external_vars, - residual_cons, - external_cons, - ) + input_vars, + external_vars, + residual_cons, + external_cons, + ) block.set_external_model(ex_model) a = m.ex_block.inputs["input_0"] @@ -268,18 +278,25 @@ def test_optimize(self): r = m.ex_block.inputs["input_2"] x = m.ex_block.inputs["input_3"] y = m.ex_block.inputs["input_4"] - m.obj = pyo.Objective(expr= - (x-2.0)**2 + (y-2.0)**2 + (a-2.0)**2 + (b-2.0)**2 + (r-2.0)**2 - ) + m.obj = pyo.Objective( + expr=(x - 2.0) ** 2 + + (y - 2.0) ** 2 + + (a - 2.0) ** 2 + + (b - 2.0) ** 2 + + (r - 2.0) ** 2 + ) # Solve with external model embedded solver = pyo.SolverFactory("cyipopt") solver.solve(m) - m_ex.obj = pyo.Objective(expr= - (m_ex.x-2.0)**2 + (m_ex.y-2.0)**2 + (m_ex.a-2.0)**2 + - (m_ex.b-2.0)**2 + (m_ex.r-2.0)**2 - ) + m_ex.obj = pyo.Objective( + expr=(m_ex.x - 2.0) ** 2 + + (m_ex.y - 2.0) ** 2 + + (m_ex.a - 2.0) ** 2 + + (m_ex.b - 2.0) ** 2 + + (m_ex.r - 2.0) ** 2 + ) m_ex.a.set_value(0.0) m_ex.b.set_value(0.0) m_ex.r.set_value(0.0) @@ -299,8 +316,9 @@ def test_optimize(self): self.assertAlmostEqual(m_ex.y.value, y.value, delta=1e-8) @unittest.skipUnless(cyipopt_available, "cyipopt is not available") - def test_optimize_with_ipopt_for_inner_problem(self): - # Use Ipopt, rather than the default CyIpopt, for the inner problem + def test_optimize_with_cyipopt_for_inner_problem(self): + # Use CyIpopt, rather than the default SciPy solvers, + # for the inner problem m = pyo.ConcreteModel() m.ex_block = ExternalGreyBoxBlock(concrete=True) block = m.ex_block @@ -310,15 +328,19 @@ def test_optimize_with_ipopt_for_inner_problem(self): external_vars = [m_ex.x, m_ex.y] residual_cons = [m_ex.c_out_1, m_ex.c_out_2] external_cons = [m_ex.c_ex_1, m_ex.c_ex_2] - inner_solver = pyo.SolverFactory("ipopt") + + # This passes options to the internal ImplicitFunctionSolver, + # which by default is SccImplicitFunctionSolver. + # This option tells it what solver to use subsystems in its + # decomposition. + solver_options = dict(solver_class=CyIpoptSolverWrapper) ex_model = ExternalPyomoModel( - input_vars, - external_vars, - residual_cons, - external_cons, - solver=inner_solver, - use_cyipopt=False, - ) + input_vars, + external_vars, + residual_cons, + external_cons, + solver_options=solver_options, + ) block.set_external_model(ex_model) a = m.ex_block.inputs["input_0"] @@ -326,18 +348,25 @@ def test_optimize_with_ipopt_for_inner_problem(self): r = m.ex_block.inputs["input_2"] x = m.ex_block.inputs["input_3"] y = m.ex_block.inputs["input_4"] - m.obj = pyo.Objective(expr= - (x-2.0)**2 + (y-2.0)**2 + (a-2.0)**2 + (b-2.0)**2 + (r-2.0)**2 - ) + m.obj = pyo.Objective( + expr=(x - 2.0) ** 2 + + (y - 2.0) ** 2 + + (a - 2.0) ** 2 + + (b - 2.0) ** 2 + + (r - 2.0) ** 2 + ) # Solve with external model embedded solver = pyo.SolverFactory("cyipopt") solver.solve(m) - m_ex.obj = pyo.Objective(expr= - (m_ex.x-2.0)**2 + (m_ex.y-2.0)**2 + (m_ex.a-2.0)**2 + - (m_ex.b-2.0)**2 + (m_ex.r-2.0)**2 - ) + m_ex.obj = pyo.Objective( + expr=(m_ex.x - 2.0) ** 2 + + (m_ex.y - 2.0) ** 2 + + (m_ex.a - 2.0) ** 2 + + (m_ex.b - 2.0) ** 2 + + (m_ex.r - 2.0) ** 2 + ) m_ex.a.set_value(0.0) m_ex.b.set_value(0.0) m_ex.r.set_value(0.0) @@ -357,11 +386,11 @@ def test_optimize_with_ipopt_for_inner_problem(self): self.assertAlmostEqual(m_ex.y.value, y.value, delta=1e-8) @unittest.skipUnless(cyipopt_available, "cyipopt is not available") - def test_optimize_no_cyipopt_for_inner_problem(self): - # Here we don't specify a solver for the inner problem. - # This is contrived, as clearly CyIpopt is available for the outer - # solve. This test exercises the part of the ExternalPyomoModel - # constructor that sets a default solver if CyIpopt is not available. + def test_optimize_no_decomposition(self): + # This is a test that does not use the SCC decomposition + # to converge the implicit function. We do this by passing + # solver_class=ImplicitFunctionSolver rather than the default, + # SccImplicitFunctionSolver m = pyo.ConcreteModel() m.ex_block = ExternalGreyBoxBlock(concrete=True) block = m.ex_block @@ -372,11 +401,12 @@ def test_optimize_no_cyipopt_for_inner_problem(self): residual_cons = [m_ex.c_out_1, m_ex.c_out_2] external_cons = [m_ex.c_ex_1, m_ex.c_ex_2] ex_model = ExternalPyomoModel( - input_vars, - external_vars, - residual_cons, - external_cons, - ) + input_vars, + external_vars, + residual_cons, + external_cons, + solver_class=ImplicitFunctionSolver, + ) block.set_external_model(ex_model) a = m.ex_block.inputs["input_0"] @@ -384,18 +414,25 @@ def test_optimize_no_cyipopt_for_inner_problem(self): r = m.ex_block.inputs["input_2"] x = m.ex_block.inputs["input_3"] y = m.ex_block.inputs["input_4"] - m.obj = pyo.Objective(expr= - (x-2.0)**2 + (y-2.0)**2 + (a-2.0)**2 + (b-2.0)**2 + (r-2.0)**2 - ) + m.obj = pyo.Objective( + expr=(x - 2.0) ** 2 + + (y - 2.0) ** 2 + + (a - 2.0) ** 2 + + (b - 2.0) ** 2 + + (r - 2.0) ** 2 + ) # Solve with external model embedded solver = pyo.SolverFactory("cyipopt") solver.solve(m) - m_ex.obj = pyo.Objective(expr= - (m_ex.x-2.0)**2 + (m_ex.y-2.0)**2 + (m_ex.a-2.0)**2 + - (m_ex.b-2.0)**2 + (m_ex.r-2.0)**2 - ) + m_ex.obj = pyo.Objective( + expr=(m_ex.x - 2.0) ** 2 + + (m_ex.y - 2.0) ** 2 + + (m_ex.a - 2.0) ** 2 + + (m_ex.b - 2.0) ** 2 + + (m_ex.r - 2.0) ** 2 + ) m_ex.a.set_value(0.0) m_ex.b.set_value(0.0) m_ex.r.set_value(0.0) @@ -425,20 +462,20 @@ def test_construct_dynamic(self): ext_cons = [m.flow_out_eqn] external_model_dict = { - t: ExternalPyomoModel( - [var[t] for var in inputs], - [var[t] for var in ext_vars], - [con[t] for con in residuals], - [con[t] for con in ext_cons], - ) - for t in time - } + t: ExternalPyomoModel( + [var[t] for var in inputs], + [var[t] for var in ext_vars], + [con[t] for con in residuals], + [con[t] for con in ext_cons], + ) + for t in time + } reduced_space = pyo.Block(concrete=True) reduced_space.external_block = ExternalGreyBoxBlock( - time, - external_model=external_model_dict, - ) + time, + external_model=external_model_dict, + ) block = reduced_space.external_block block[t0].deactivate() self.assertIs(type(block), IndexedExternalGreyBoxBlock) @@ -457,10 +494,10 @@ def test_construct_dynamic(self): pyomo_vars = list(reduced_space.component_data_objects(pyo.Var)) pyomo_cons = list(reduced_space.component_data_objects(pyo.Constraint)) # NOTE: Variables in the EGBB are not found by component_data_objects - self.assertEqual(len(pyomo_vars), len(inputs)*len(time)) + self.assertEqual(len(pyomo_vars), len(inputs) * len(time)) # "Constraints" defined by the EGBB are not found either, although # this is expected. - self.assertEqual(len(pyomo_cons), len(time)-1) + self.assertEqual(len(pyomo_cons), len(time) - 1) reduced_space._obj = pyo.Objective(expr=0) @@ -472,24 +509,24 @@ def test_construct_dynamic(self): # This is necessary for these variables to appear in the PNLPwGBB. # Otherwise they don't appear in any "real" constraints of the - # PyomoNLP. + # PyomoNLP. reduced_space.const_input_eqn = pyo.Constraint( expr=reduced_space.input_var[2] - reduced_space.input_var[1] == 0 - ) + ) nlp = PyomoNLPWithGreyBoxBlocks(reduced_space) self.assertEqual( - nlp.n_primals(), - # EGBB "inputs", dhdt, and flow_in exist for t != t0. - # h exists for all time. - (2+len(inputs))*(len(time)-1) + len(time), - ) + nlp.n_primals(), + # EGBB "inputs", dhdt, and flow_in exist for t != t0. + # h exists for all time. + (2 + len(inputs)) * (len(time) - 1) + len(time), + ) self.assertEqual( - nlp.n_constraints(), - # EGBB equality constraints and disc_eqn exist for t != t0. - # const_input_eqn is a single constraint - (len(residuals)+1)*(len(time)-1) + 1, - ) + nlp.n_constraints(), + # EGBB equality constraints and disc_eqn exist for t != t0. + # const_input_eqn is a single constraint + (len(residuals) + 1) * (len(time) - 1) + 1, + ) @unittest.skipUnless(cyipopt_available, "cyipopt is not available") def test_solve_square_dynamic(self): @@ -518,14 +555,15 @@ def test_solve_square_dynamic(self): residual_cons = [m.h_diff_eqn[t]] external_cons = [m.flow_out_eqn[t]] external_model = ExternalPyomoModel( - input_vars, - external_vars, - residual_cons, - external_cons, - ) + input_vars, + external_vars, + residual_cons, + external_cons, + ) block[t].set_external_model(external_model) n_inputs = len(input_vars) + def linking_constraint_rule(m, i, t): if t == t0: return pyo.Constraint.Skip @@ -535,10 +573,10 @@ def linking_constraint_rule(m, i, t): return m.deriv_var[t] == m.external_block[t].inputs["input_1"] reduced_space.linking_constraint = pyo.Constraint( - range(n_inputs), - time, - rule=linking_constraint_rule, - ) + range(n_inputs), + time, + rule=linking_constraint_rule, + ) # Initialize new variables for t in time: if t != t0: @@ -571,9 +609,9 @@ def test_optimize_dynamic(self): m.h[t0].fix(1.2) m.flow_in[t0].fix(1.5) - m.obj = pyo.Objective(expr=sum( - (m.h[t] - 2.0)**2 for t in m.time if t != t0 - )) + m.obj = pyo.Objective( + expr=sum((m.h[t] - 2.0) ** 2 for t in m.time if t != t0) + ) # Create the block that will hold the reduced space model. reduced_space = pyo.Block(concrete=True) @@ -594,14 +632,15 @@ def test_optimize_dynamic(self): residual_cons = [m.h_diff_eqn[t]] external_cons = [m.flow_out_eqn[t]] external_model = ExternalPyomoModel( - input_vars, - external_vars, - residual_cons, - external_cons, - ) + input_vars, + external_vars, + residual_cons, + external_cons, + ) block[t].set_external_model(external_model) n_inputs = len(input_vars) + def linking_constraint_rule(m, i, t): if t == t0: return pyo.Constraint.Skip @@ -613,10 +652,10 @@ def linking_constraint_rule(m, i, t): return m.input_var[t] == m.external_block[t].inputs["input_2"] reduced_space.linking_constraint = pyo.Constraint( - range(n_inputs), - time, - rule=linking_constraint_rule, - ) + range(n_inputs), + time, + rule=linking_constraint_rule, + ) # Initialize new variables for t in time: if t != t0: @@ -636,10 +675,18 @@ def linking_constraint_rule(m, i, t): for t in time: if t == t0: continue - values = [m.h[t].value, m.dhdt[t].value, - m.flow_out[t].value, m.flow_in[t].value] - target_values = [h_target[t], dhdt_target[t], - flow_out_target[t], flow_in_target[t]] + values = [ + m.h[t].value, + m.dhdt[t].value, + m.flow_out[t].value, + m.flow_in[t].value, + ] + target_values = [ + h_target[t], + dhdt_target[t], + flow_out_target[t], + flow_in_target[t], + ] self.assertStructuredAlmostEqual(values, target_values, delta=1e-5) @unittest.skipUnless(cyipopt_available, "cyipopt is not available") @@ -655,9 +702,9 @@ def test_optimize_dynamic_references(self): m.h[t0].fix(1.2) m.flow_in[t0].fix(1.5) - m.obj = pyo.Objective(expr=sum( - (m.h[t] - 2.0)**2 for t in m.time if t != t0 - )) + m.obj = pyo.Objective( + expr=sum((m.h[t] - 2.0) ** 2 for t in m.time if t != t0) + ) # Create the block that will hold the reduced space model. reduced_space = pyo.Block(concrete=True) @@ -678,11 +725,11 @@ def test_optimize_dynamic_references(self): residual_cons = [m.h_diff_eqn[t]] external_cons = [m.flow_out_eqn[t]] external_model = ExternalPyomoModel( - input_vars, - external_vars, - residual_cons, - external_cons, - ) + input_vars, + external_vars, + residual_cons, + external_cons, + ) block[t].set_external_model(external_model, inputs=input_vars) solver = pyo.SolverFactory("cyipopt") @@ -697,15 +744,22 @@ def test_optimize_dynamic_references(self): for t in time: if t == t0: continue - values = [m.h[t].value, m.dhdt[t].value, - m.flow_out[t].value, m.flow_in[t].value] - target_values = [h_target[t], dhdt_target[t], - flow_out_target[t], flow_in_target[t]] + values = [ + m.h[t].value, + m.dhdt[t].value, + m.flow_out[t].value, + m.flow_in[t].value, + ] + target_values = [ + h_target[t], + dhdt_target[t], + flow_out_target[t], + flow_in_target[t], + ] self.assertStructuredAlmostEqual(values, target_values, delta=1e-5) class TestPyomoNLPWithGreyBoxBLocks(unittest.TestCase): - def test_set_and_evaluate(self): m = pyo.ConcreteModel() m.ex_block = ExternalGreyBoxBlock(concrete=True) @@ -717,12 +771,11 @@ def test_set_and_evaluate(self): residual_cons = [m_ex.c_out_1, m_ex.c_out_2] external_cons = [m_ex.c_ex_1, m_ex.c_ex_2] ex_model = ExternalPyomoModel( - input_vars, - external_vars, - residual_cons, - external_cons, - use_cyipopt=False, - ) + input_vars, + external_vars, + residual_cons, + external_cons, + ) block.set_external_model(ex_model) a = m.ex_block.inputs["input_0"] @@ -730,9 +783,13 @@ def test_set_and_evaluate(self): r = m.ex_block.inputs["input_2"] x = m.ex_block.inputs["input_3"] y = m.ex_block.inputs["input_4"] - m.obj = pyo.Objective(expr= - (x-2.0)**2 + (y-2.0)**2 + (a-2.0)**2 + (b-2.0)**2 + (r-2.0)**2 - ) + m.obj = pyo.Objective( + expr=(x - 2.0) ** 2 + + (y - 2.0) ** 2 + + (a - 2.0) ** 2 + + (b - 2.0) ** 2 + + (r - 2.0) ** 2 + ) _add_linking_constraints(m) @@ -747,15 +804,15 @@ def test_set_and_evaluate(self): # PyomoNLPWithGreyBoxBlocks sorts variables by name primals_names = [ - "a", - "b", - "ex_block.inputs[input_0]", - "ex_block.inputs[input_1]", - "ex_block.inputs[input_2]", - "ex_block.inputs[input_3]", - "ex_block.inputs[input_4]", - "r", - ] + "a", + "b", + "ex_block.inputs[input_0]", + "ex_block.inputs[input_1]", + "ex_block.inputs[input_2]", + "ex_block.inputs[input_3]", + "ex_block.inputs[input_4]", + "r", + ] self.assertEqual(nlp.primals_names(), primals_names) np.testing.assert_equal(np.zeros(8), nlp.get_primals()) @@ -769,25 +826,28 @@ def test_set_and_evaluate(self): self.assertEqual(var.value, val) constraint_names = [ - "linking_constraint[0]", - "linking_constraint[1]", - "linking_constraint[2]", - "ex_block.residual_0", - "ex_block.residual_1", - ] + "linking_constraint[0]", + "linking_constraint[1]", + "linking_constraint[2]", + "ex_block.residual_0", + "ex_block.residual_1", + ] self.assertEqual(constraint_names, nlp.constraint_names()) - residuals = np.array([ + residuals = np.array( + [ -2.0, -2.0, 3.0, # These values were obtained by solving the same system # with Ipopt in another script. It may be better to do # the solve in this test in case the system changes. - 5.0-(-3.03051522), - 6.0-3.583839997, - ]) - np.testing.assert_allclose(residuals, nlp.evaluate_constraints(), - rtol=1e-8) + 5.0 - (-3.03051522), + 6.0 - 3.583839997, + ] + ) + np.testing.assert_allclose( + residuals, nlp.evaluate_constraints(), rtol=1e-8 + ) duals = np.array([1, 2, 3, 4, 5]) nlp.set_duals(duals) @@ -806,12 +866,11 @@ def test_jacobian(self): residual_cons = [m_ex.c_out_1, m_ex.c_out_2] external_cons = [m_ex.c_ex_1, m_ex.c_ex_2] ex_model = ExternalPyomoModel( - input_vars, - external_vars, - residual_cons, - external_cons, - use_cyipopt=False, - ) + input_vars, + external_vars, + residual_cons, + external_cons, + ) block.set_external_model(ex_model) a = m.ex_block.inputs["input_0"] @@ -819,9 +878,13 @@ def test_jacobian(self): r = m.ex_block.inputs["input_2"] x = m.ex_block.inputs["input_3"] y = m.ex_block.inputs["input_4"] - m.obj = pyo.Objective(expr= - (x-2.0)**2 + (y-2.0)**2 + (a-2.0)**2 + (b-2.0)**2 + (r-2.0)**2 - ) + m.obj = pyo.Objective( + expr=(x - 2.0) ** 2 + + (y - 2.0) ** 2 + + (a - 2.0) ** 2 + + (b - 2.0) ** 2 + + (r - 2.0) ** 2 + ) _add_linking_constraints(m) @@ -851,26 +914,59 @@ def test_jacobian(self): # "r", # ] row = [ - 0, 0, - 1, 1, - 2, 2, - 3, 3, 3, 3, 3, - 4, 4, 4, 4, 4, - ] + 0, + 0, + 1, + 1, + 2, + 2, + 3, + 3, + 3, + 3, + 3, + 4, + 4, + 4, + 4, + 4, + ] col = [ - 0, 2, - 1, 3, - 7, 4, - 2, 3, 4, 5, 6, - 2, 3, 4, 5, 6, - ] + 0, + 2, + 1, + 3, + 7, + 4, + 2, + 3, + 4, + 5, + 6, + 2, + 3, + 4, + 5, + 6, + ] data = [ - 1, -1, - 1, -1, - 1, -1, - -0.16747094, -1.00068434, 1.72383729, 1, 0, - -0.30708535, -0.28546127, -0.25235924, 0, 1, - ] + 1, + -1, + 1, + -1, + 1, + -1, + -0.16747094, + -1.00068434, + 1.72383729, + 1, + 0, + -0.30708535, + -0.28546127, + -0.25235924, + 0, + 1, + ] self.assertEqual(len(row), len(jac.row)) rcd_dict = dict(((i, j), val) for i, j, val in zip(row, col, data)) for i, j, val in zip(jac.row, jac.col, jac.data): @@ -889,12 +985,11 @@ def test_hessian_1(self): residual_cons = [m_ex.c_out_1, m_ex.c_out_2] external_cons = [m_ex.c_ex_1, m_ex.c_ex_2] ex_model = ExternalPyomoModel( - input_vars, - external_vars, - residual_cons, - external_cons, - use_cyipopt=False, - ) + input_vars, + external_vars, + residual_cons, + external_cons, + ) block.set_external_model(ex_model) a = m.ex_block.inputs["input_0"] @@ -902,9 +997,13 @@ def test_hessian_1(self): r = m.ex_block.inputs["input_2"] x = m.ex_block.inputs["input_3"] y = m.ex_block.inputs["input_4"] - m.obj = pyo.Objective(expr= - (x-2.0)**2 + (y-2.0)**2 + (a-2.0)**2 + (b-2.0)**2 + (r-2.0)**2 - ) + m.obj = pyo.Objective( + expr=(x - 2.0) ** 2 + + (y - 2.0) ** 2 + + (a - 2.0) ** 2 + + (b - 2.0) ** 2 + + (r - 2.0) ** 2 + ) _add_nonlinear_linking_constraints(m) @@ -941,18 +1040,18 @@ def test_hessian_1(self): # while writing this test, which is just meant to verify # that the different Hessians combined properly. ex_block_nonzeros = { - (2, 2): 2.0 + (-1.0) + (-0.10967928) + (-0.25595929), - (2, 3): (-0.10684633) + (0.05169308), - (3, 2): (-0.10684633) + (0.05169308), - (2, 4): (0.19329898) + (0.03823075), - (4, 2): (0.19329898) + (0.03823075), - (3, 3): 2.0 + (-1.0) + (-1.31592135) + (-0.0241836), - (3, 4): (1.13920361) + (0.01063667), - (4, 3): (1.13920361) + (0.01063667), - (4, 4): 2.0 + (-1.0) + (-1.0891866) + (0.01190218), - (5, 5): 2.0, - (6, 6): 2.0, - } + (2, 2): 2.0 + (-1.0) + (-0.10967928) + (-0.25595929), + (2, 3): (-0.10684633) + (0.05169308), + (3, 2): (-0.10684633) + (0.05169308), + (2, 4): (0.19329898) + (0.03823075), + (4, 2): (0.19329898) + (0.03823075), + (3, 3): 2.0 + (-1.0) + (-1.31592135) + (-0.0241836), + (3, 4): (1.13920361) + (0.01063667), + (4, 3): (1.13920361) + (0.01063667), + (4, 4): 2.0 + (-1.0) + (-1.0891866) + (0.01190218), + (5, 5): 2.0, + (6, 6): 2.0, + } rcd_dict.update(ex_block_nonzeros) # Because "external Hessians" are computed by factorizing matrices, @@ -981,12 +1080,11 @@ def test_hessian_2(self): residual_cons = [m_ex.c_out_1, m_ex.c_out_2] external_cons = [m_ex.c_ex_1, m_ex.c_ex_2] ex_model = ExternalPyomoModel( - input_vars, - external_vars, - residual_cons, - external_cons, - use_cyipopt=False, - ) + input_vars, + external_vars, + residual_cons, + external_cons, + ) block.set_external_model(ex_model) a = m.ex_block.inputs["input_0"] @@ -994,9 +1092,13 @@ def test_hessian_2(self): r = m.ex_block.inputs["input_2"] x = m.ex_block.inputs["input_3"] y = m.ex_block.inputs["input_4"] - m.obj = pyo.Objective(expr= - (x-2.0)**2 + (y-2.0)**2 + (a-2.0)**2 + (b-2.0)**2 + (r-2.0)**2 - ) + m.obj = pyo.Objective( + expr=(x - 2.0) ** 2 + + (y - 2.0) ** 2 + + (a - 2.0) ** 2 + + (b - 2.0) ** 2 + + (r - 2.0) ** 2 + ) _add_nonlinear_linking_constraints(m) @@ -1021,7 +1123,7 @@ def test_hessian_2(self): row = [0, 1, 7] col = [0, 1, 7] # Data entries are influenced by multiplier values. - data = [4.4*2.0, -3.3*2.0, 2.2*2.0] + data = [4.4 * 2.0, -3.3 * 2.0, 2.2 * 2.0] # ^ These variables only appear in linking constraints rcd_dict = dict(((i, j), val) for i, j, val in zip(row, col, data)) @@ -1032,18 +1134,18 @@ def test_hessian_2(self): # while writing this test, which is just meant to verify # that the different Hessians combined properly. ex_block_nonzeros = { - (2, 2): 2.0 + 4.4*(-1.0) + -1.1*(-0.10967928), - (2, 3): -1.1*(-0.10684633), - (3, 2): -1.1*(-0.10684633), - (2, 4): -1.1*(0.19329898), - (4, 2): -1.1*(0.19329898), - (3, 3): 2.0 + (-3.3)*(-1.0) + -1.1*(-1.31592135), - (3, 4): -1.1*(1.13920361), - (4, 3): -1.1*(1.13920361), - (4, 4): 2.0 + 2.2*(-1.0) + -1.1*(-1.0891866), - (5, 5): 2.0, - (6, 6): 2.0, - } + (2, 2): 2.0 + 4.4 * (-1.0) + -1.1 * (-0.10967928), + (2, 3): -1.1 * (-0.10684633), + (3, 2): -1.1 * (-0.10684633), + (2, 4): -1.1 * (0.19329898), + (4, 2): -1.1 * (0.19329898), + (3, 3): 2.0 + (-3.3) * (-1.0) + -1.1 * (-1.31592135), + (3, 4): -1.1 * (1.13920361), + (4, 3): -1.1 * (1.13920361), + (4, 4): 2.0 + 2.2 * (-1.0) + -1.1 * (-1.0891866), + (5, 5): 2.0, + (6, 6): 2.0, + } rcd_dict.update(ex_block_nonzeros) # Because "external Hessians" are computed by factorizing matrices, @@ -1086,7 +1188,7 @@ def _create_pressure_drop_model(self): m.c_con = pyo.Constraint(expr=m.c == 1.0) m.F_con = pyo.Constraint(expr=m.F == 10.0) m.P2_con = pyo.Constraint(expr=m.P2 <= 5.0) - m.obj = pyo.Objective(expr=(m.Pout - 3.0)**2) + m.obj = pyo.Objective(expr=(m.Pout - 3.0) ** 2) cons = [m.c_con, m.F_con, m.Pin_con, m.P2_con] inputs = [m.Pin, m.c, m.F] @@ -1096,10 +1198,10 @@ def _create_pressure_drop_model(self): ex_model = PressureDropTwoOutputsWithHessian() m.egb = ExternalGreyBoxBlock() m.egb.set_external_model( - ex_model, - inputs=inputs, - outputs=outputs, - ) + ex_model, + inputs=inputs, + outputs=outputs, + ) return m def test_pressure_drop_model(self): @@ -1114,7 +1216,7 @@ def test_pressure_drop_model(self): # The references to inputs and outputs are not picked up twice, # as EGBB does not have ctype Block - self.assertEqual(len(pyomo_variables), len(inputs)+len(outputs)) + self.assertEqual(len(pyomo_variables), len(inputs) + len(outputs)) self.assertEqual(len(pyomo_constraints), len(cons)) # Test the inputs and outputs attributes on egb @@ -1137,20 +1239,20 @@ def test_pressure_drop_model_nlp(self): outputs = [m.P2, m.Pout] nlp = PyomoNLPWithGreyBoxBlocks(m) - + n_primals = len(inputs) + len(outputs) n_eq_con = len(cons) + len(outputs) self.assertEqual(nlp.n_primals(), n_primals) self.assertEqual(nlp.n_constraints(), n_eq_con) constraint_names = [ - "c_con", - "F_con", - "Pin_con", - "P2_con", - "egb.output_constraints[P2]", - "egb.output_constraints[Pout]", - ] + "c_con", + "F_con", + "Pin_con", + "P2_con", + "egb.output_constraints[P2]", + "egb.output_constraints[Pout]", + ] primals = inputs + outputs nlp_constraints = nlp.constraint_names() nlp_vars = nlp.primals_names() @@ -1166,8 +1268,9 @@ def test_pressure_drop_model_nlp(self): name = var.name var_idx_map[var] = nlp_vars.index(name) - incident_vars = {con.name: list(identify_variables(con.expr)) - for con in cons} + incident_vars = { + con.name: list(identify_variables(con.expr)) for con in cons + } incident_vars["egb.output_constraints[P2]"] = inputs + [outputs[0]] incident_vars["egb.output_constraints[Pout]"] = inputs + [outputs[1]] @@ -1185,62 +1288,5 @@ def test_pressure_drop_model_nlp(self): self.assertIn((i, j), expected_nonzeros) -class TestExceptions(unittest.TestCase): - - @unittest.skipUnless(cyipopt_available, "cyipopt is not available") - def test_solver_with_cyipopt(self): - # CyIpopt is required here just because we get a different error - # (see test below) if CyIpopt is unavailable. - m = pyo.ConcreteModel() - m.ex_block = ExternalGreyBoxBlock(concrete=True) - block = m.ex_block - - m_ex = _make_external_model() - input_vars = [m_ex.a, m_ex.b, m_ex.r, m_ex.x_out, m_ex.y_out] - external_vars = [m_ex.x, m_ex.y] - residual_cons = [m_ex.c_out_1, m_ex.c_out_2] - external_cons = [m_ex.c_ex_1, m_ex.c_ex_2] - inner_solver = pyo.SolverFactory("ipopt") - msg = "Please set use_cyipopt to False" - with self.assertRaisesRegex(RuntimeError, msg): - ex_model = ExternalPyomoModel( - input_vars, - external_vars, - residual_cons, - external_cons, - solver=inner_solver, - use_cyipopt=True, - ) - - def test_cyipopt_unavailable(self): - try: - # HACK: Make external_pyomo_model.py think that cyipopt is not - # available. - epm_module.cyipopt_available = False - - m = pyo.ConcreteModel() - m.ex_block = ExternalGreyBoxBlock(concrete=True) - block = m.ex_block - - m_ex = _make_external_model() - input_vars = [m_ex.a, m_ex.b, m_ex.r, m_ex.x_out, m_ex.y_out] - external_vars = [m_ex.x, m_ex.y] - residual_cons = [m_ex.c_out_1, m_ex.c_out_2] - external_cons = [m_ex.c_ex_1, m_ex.c_ex_2] - inner_solver = pyo.SolverFactory("ipopt") - msg = "Constructing an ExternalPyomoModel with CyIpopt unavailable" - with self.assertRaisesRegex(RuntimeError, msg): - ex_model = ExternalPyomoModel( - input_vars, - external_vars, - residual_cons, - external_cons, - use_cyipopt=True, - ) - finally: - # HACK: Reset the global flag - epm_module.cyipopt_available = cyipopt_available - - -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/pyomo/contrib/pynumero/interfaces/tests/test_external_pyomo_model.py b/pyomo/contrib/pynumero/interfaces/tests/test_external_pyomo_model.py index da6467d85bf..5a423cc06cd 100644 --- a/pyomo/contrib/pynumero/interfaces/tests/test_external_pyomo_model.py +++ b/pyomo/contrib/pynumero/interfaces/tests/test_external_pyomo_model.py @@ -780,17 +780,19 @@ def test_jacobian_SimpleModel2x2_1(self): x1_init_list = [-4.5, -2.3, 0.0, 1.0, 4.1] x_init_list = list(itertools.product(x0_init_list, x1_init_list)) external_model = ExternalPyomoModel( - list(m.x.values()), - list(m.y.values()), - list(m.residual_eqn.values()), - list(m.external_eqn.values()), - ) + list(m.x.values()), + list(m.y.values()), + list(m.residual_eqn.values()), + list(m.external_eqn.values()), + ) for x in x_init_list: external_model.set_input_values(x) jac = external_model.evaluate_jacobian_equality_constraints() expected_jac = model.evaluate_jacobian(x) - np.testing.assert_allclose(jac.toarray(), expected_jac, rtol=1e-8) + np.testing.assert_allclose( + jac.toarray(), expected_jac, rtol=1e-8, atol=1e-8 + ) def test_hessian_SimpleModel2x2_1(self): model = SimpleModel2by2_1() @@ -1087,7 +1089,6 @@ def make_model(self): [m.x, m.y], [m.con_3, m.con_4], [m.con_1, m.con_2], - use_cyipopt=False, ) epm_model.obj = pyo.Objective( expr=m.x**2 + m.y**2 + m.u**2 + m.v**2 diff --git a/pyomo/core/tests/unit/test_compare.py b/pyomo/core/tests/unit/test_compare.py index 8811b2b19f3..31dac7f5c2e 100644 --- a/pyomo/core/tests/unit/test_compare.py +++ b/pyomo/core/tests/unit/test_compare.py @@ -23,7 +23,7 @@ convert_expression_to_prefix_notation, compare_expressions, assertExpressionsEqual, assertExpressionsStructurallyEqual, ) -from pyomo.common.getGSL import find_GSL +from pyomo.common.gsl import find_GSL class TestConvertToPrefixNotation(unittest.TestCase): diff --git a/pyomo/core/tests/unit/test_derivs.py b/pyomo/core/tests/unit/test_derivs.py index 74d117068fd..a700e3df688 100644 --- a/pyomo/core/tests/unit/test_derivs.py +++ b/pyomo/core/tests/unit/test_derivs.py @@ -11,8 +11,8 @@ import pyomo.common.unittest as unittest import pyomo.environ as pyo -from pyomo.common.getGSL import find_GSL -from pyomo.core.expr.calculus.derivatives import differentiate, Modes +from pyomo.common.gsl import find_GSL +from pyomo.core.expr.calculus.derivatives import differentiate from pyomo.core.expr.calculus.diff_with_pyomo import ( reverse_ad, reverse_sd, DifferentiationException, ) diff --git a/pyomo/core/tests/unit/test_external.py b/pyomo/core/tests/unit/test_external.py index 1893a336ee8..b5238b68fe8 100644 --- a/pyomo/core/tests/unit/test_external.py +++ b/pyomo/core/tests/unit/test_external.py @@ -16,7 +16,7 @@ import pyomo.common.unittest as unittest -from pyomo.common.getGSL import find_GSL +from pyomo.common.gsl import find_GSL from pyomo.common.log import LoggingIntercept from pyomo.common.tempfiles import TempfileManager from pyomo.environ import ( diff --git a/pyomo/dataportal/factory.py b/pyomo/dataportal/factory.py index 2c35b450c51..41b8bf34525 100644 --- a/pyomo/dataportal/factory.py +++ b/pyomo/dataportal/factory.py @@ -16,7 +16,7 @@ import logging from pyomo.common import Factory -from pyomo.common.plugin import PluginError +from pyomo.common.plugin_base import PluginError logger = logging.getLogger('pyomo.core') diff --git a/pyomo/repn/tests/ampl/test_ampl_nl.py b/pyomo/repn/tests/ampl/test_ampl_nl.py index a2e067a192d..5e4877bacb9 100644 --- a/pyomo/repn/tests/ampl/test_ampl_nl.py +++ b/pyomo/repn/tests/ampl/test_ampl_nl.py @@ -15,7 +15,7 @@ import pyomo.common.unittest as unittest -from pyomo.common.getGSL import find_GSL +from pyomo.common.gsl import find_GSL from pyomo.common.fileutils import this_file_dir from pyomo.common.tempfiles import TempfileManager from pyomo.environ import ( diff --git a/pyomo/scripting/driver_help.py b/pyomo/scripting/driver_help.py index 5d321d40650..6cf64f741bc 100644 --- a/pyomo/scripting/driver_help.py +++ b/pyomo/scripting/driver_help.py @@ -109,7 +109,7 @@ def help_writers(): def help_checkers(): import pyomo.environ - import pyomo.common.plugin + import pyomo.common.plugin_base from pyomo.checker import IModelChecker wrapper = textwrap.TextWrapper() wrapper.initial_indent = ' ' @@ -117,7 +117,7 @@ def help_checkers(): print("") print("Pyomo Model Checkers") print("--------------------") - ep = pyomo.common.plugin.ExtensionPoint(IModelChecker) + ep = pyomo.common.plugin_base.ExtensionPoint(IModelChecker) tmp = {} for checker in ep.extensions(): for alias in getattr(checker, '_factory_aliases', set()): diff --git a/pyomo/scripting/interface.py b/pyomo/scripting/interface.py index 5c01c3c95e0..c06cf313884 100644 --- a/pyomo/scripting/interface.py +++ b/pyomo/scripting/interface.py @@ -9,7 +9,7 @@ # This software is distributed under the 3-clause BSD License. # ___________________________________________________________________________ -from pyomo.common.plugin import ( +from pyomo.common.plugin_base import ( Interface, DeprecatedInterface, Plugin, SingletonPlugin, ExtensionPoint, implements, alias ) diff --git a/pyomo/scripting/plugins/download.py b/pyomo/scripting/plugins/download.py index 16f414db982..610937cb4b4 100644 --- a/pyomo/scripting/plugins/download.py +++ b/pyomo/scripting/plugins/download.py @@ -36,6 +36,9 @@ def _call_impl(self, args, unparsed, logger): returncode = 0 self.downloader.cacert = args.cacert self.downloader.insecure = args.insecure + logger.info("As of February 9, 2023, AMPL GSL can no longer be downloaded\ + through download-extensions. Visit https://portal.ampl.com/\ + to download the AMPL GSL binaries.") for target in DownloadFactory: try: ext = DownloadFactory(target, downloader=self.downloader) diff --git a/pyomo/util/tests/test_subsystems.py b/pyomo/util/tests/test_subsystems.py index ee1851b6c7d..9430b3b80a8 100644 --- a/pyomo/util/tests/test_subsystems.py +++ b/pyomo/util/tests/test_subsystems.py @@ -20,7 +20,7 @@ identify_external_functions, add_local_external_functions, ) -from pyomo.common.getGSL import find_GSL +from pyomo.common.gsl import find_GSL def _make_simple_model(): diff --git a/pyomo/version/info.py b/pyomo/version/info.py index a106ee561ec..ac68273d1de 100644 --- a/pyomo/version/info.py +++ b/pyomo/version/info.py @@ -9,7 +9,7 @@ # This software is distributed under the 3-clause BSD License. # ___________________________________________________________________________ -_init_url="$URL$" +_init_url = "$URL$" # NOTE: releaselevel should be left at 'invalid' for trunk development # and set to 'final' for releases. During development, the @@ -24,19 +24,20 @@ # {hash}" and set the serial number to YYMMDDhhmm. The serial number # should generally be left at 0, unless a downstream package is tracking # main and needs a hard reference to "suitably new" development. -major=6 -minor=4 -micro=5 -releaselevel='invalid' -#releaselevel='final' -serial=0 +major = 6 +minor = 4 +micro = 5 +releaselevel = 'invalid' +# releaselevel='final' +serial = 0 if releaselevel == 'final': pass -elif '/tags/' in _init_url: #pragma:nocover +elif '/tags/' in _init_url: # pragma:nocover releaselevel = 'final' elif releaselevel == 'invalid': from os.path import abspath, dirname, exists, join + if __file__.endswith('setup.py'): # This file is being sources (exec'ed) from setup.py. # dirname(__file__) setup.py's scope is the root sourec directory @@ -49,20 +50,22 @@ # __file__ fails if someone does os.chdir() before # sys.argv[0] also fails because it doesn't not always contains the path from inspect import getfile, currentframe + _rootdir = join(dirname(abspath(getfile(currentframe()))), '..', '..') if exists(join(_rootdir, '.git')): try: with open(join(_rootdir, '.git', 'HEAD')) as _FILE: - _ref = _FILE.readline().strip() #pragma:nocover + _ref = _FILE.readline().strip() # pragma:nocover releaselevel = 'devel {%s}' % ( - _ref.split('/')[-1].split('\\')[-1], ) #pragma:nocover + _ref.split('/')[-1].split('\\')[-1], + ) # pragma:nocover except: - releaselevel = 'devel' #pragma:nocover + releaselevel = 'devel' # pragma:nocover elif exists(join(_rootdir, '.svn')): - releaselevel = 'devel {svn}' #pragma:nocover + releaselevel = 'devel {svn}' # pragma:nocover else: - releaselevel = 'VOTD' #pragma:nocover + releaselevel = 'VOTD' # pragma:nocover version_info = (major, minor, micro, releaselevel, serial) @@ -70,7 +73,7 @@ version = '.'.join(str(x) for x in version_info[:3]) __version__ = version if releaselevel != 'final': - version += ' ('+releaselevel+')' + version += ' (' + releaselevel + ')' if releaselevel.startswith('devel'): __version__ += ".dev%d" % (serial,) elif releaselevel.startswith('VOTD'): diff --git a/pyomo/version/tests/check.py b/pyomo/version/tests/check.py index 7d20c133886..ab3b45ffc6c 100644 --- a/pyomo/version/tests/check.py +++ b/pyomo/version/tests/check.py @@ -15,7 +15,8 @@ import pyomo import pyomo.environ import pyomo.core + print("OK") except Exception: e = sys.exc_info()[1] - print("Pyomo package error: "+str(e)) + print("Pyomo package error: " + str(e)) diff --git a/pyomo/version/tests/test_version.py b/pyomo/version/tests/test_version.py index 47cf2e3f90b..253ee53137c 100644 --- a/pyomo/version/tests/test_version.py +++ b/pyomo/version/tests/test_version.py @@ -14,14 +14,14 @@ class Tests(unittest.TestCase): - def test_releaselevel(self): _relLevel = pyomo_ver.version_info[3].split('{')[0].strip() - self.assertIn( _relLevel,('devel','VOTD','final') ) + self.assertIn(_relLevel, ('devel', 'VOTD', 'final')) def test_version(self): try: import pkg_resources + version = pkg_resources.get_distribution('pyomo').version except: self.skipTest('pkg_resources is not available') @@ -35,6 +35,7 @@ def test_version(self): self.assertEqual(str(tmp_[1]), str(pyomo_ver.version_info[1])) if tmp_[-1].startswith('dev'): import pyomo.version.info as info + self.assertEqual(int(tmp_[-1][3:]), info.serial) tmp_.pop() if len(tmp_) > 2: diff --git a/scripts/get_pyomo.py b/scripts/get_pyomo.py index 969346afb0b..a97c0ba3a00 100644 --- a/scripts/get_pyomo.py +++ b/scripts/get_pyomo.py @@ -13,10 +13,12 @@ # import sys + try: import pip + pip_version = pip.__version__.split('.') - for i,s in enumerate(pip_version): + for i, s in enumerate(pip_version): try: pip_version[i] = int(s) except: @@ -29,7 +31,7 @@ print("Installing Pyomo ...") -cmd = ['install',] +cmd = ['install'] # Disable the PIP download cache if pip_version[0] >= 6: cmd.append('--no-cache-dir') @@ -37,7 +39,7 @@ cmd.append('--download-cache') cmd.append('') # Allow the user to provide extra options -cmd.extend( sys.argv[1:] ) +cmd.extend(sys.argv[1:]) # install Pyomo cmd.append('Pyomo') diff --git a/scripts/get_pyomo_extras.py b/scripts/get_pyomo_extras.py index 7a2f2bd2299..d2aa097154a 100644 --- a/scripts/get_pyomo_extras.py +++ b/scripts/get_pyomo_extras.py @@ -27,7 +27,10 @@ callerFrame = inspect.stack()[0] _dir = os.path.join( dirname(dirname(abspath(inspect.getfile(callerFrame[0])))), - 'pyomo','scripting','plugins') + 'pyomo', + 'scripting', + 'plugins', + ) sys.path.insert(0, _dir) extras = __import__('extras') extras.install_extras() diff --git a/scripts/performance/compare.py b/scripts/performance/compare.py index d2e8b1949fb..5edef9bfadd 100755 --- a/scripts/performance/compare.py +++ b/scripts/performance/compare.py @@ -19,7 +19,8 @@ from math import sqrt, log10, floor from statistics import stdev, mean -#import scipy.stats as st + +# import scipy.stats as st # scipy.stats.norm.ppf(0.9) # 0.95 = 1.6448536269514722 # 0.90 = 1.2815515655446004 @@ -29,11 +30,12 @@ # Z-score: (mean(x) - mean(y)) / sqrt( # stdev(x)**2 / card(x) + stdev(y)**2 / card(y) ) + class Result(object): z_threshold = 1.6448536269514722 # 95% - #z_threshold = 1.2815515655446004 # 90% - #z_threshold = 0.8416212335729143 # 80% - #z_threshold = 0.6744897501960817 # 75% + # z_threshold = 1.2815515655446004 # 90% + # z_threshold = 0.8416212335729143 # 80% + # z_threshold = 0.6744897501960817 # 75% def __init__(self, test, base=None, relative=False, precision=None): self.test = test @@ -83,7 +85,7 @@ def test_base_value(self): def __float__(self): val, dev = self.value() if isinstance(val, str): - return 0. + return 0.0 return float(val) def __lt__(self, other): @@ -107,12 +109,16 @@ def tostr(self, width=0): precision = self.precision if width: - precision = max(0, min( - precision, - width - (2 if val >= 0 else 3) - ( - 1 if not val else floor(log10(abs(val)))) - )) - val_str = ('%%%d.%df' % (width, precision,)) % val + precision = max( + 0, + min( + precision, + width + - (2 if val >= 0 else 3) + - (1 if not val else floor(log10(abs(val)))), + ), + ) + val_str = ('%%%d.%df' % (width, precision)) % val if z > Result.z_threshold: if val < 0: return '\033[92m' + val_str + '\033[0m' @@ -139,8 +145,9 @@ def combine(*results): if "::" not in test: # Convert nosetests results in to pytest format path, test = test.split(':') - test = '/'.join(path.split('.')) + '.py::' \ - + '::'.join(test.split('.')) + test = ( + '/'.join(path.split('.')) + '.py::' + '::'.join(test.split('.')) + ) if test not in ans: ans[test] = {} testdata = ans[test] @@ -166,6 +173,7 @@ def combine(*results): break return ans + def compare(base_data, test_data): """Compare two data sets (generated by compare())""" # Nosetests and pytest would give different test names (based on @@ -191,29 +199,35 @@ def compare(base_data, test_data): if testname not in test_data: continue fields.update(set(base).intersection(test_data[testname])) - fields = sorted(fields - {'test_time',}) + fields = sorted(fields - {'test_time'}) lines = [] for testname, base in base_data.items(): if testname not in test_data: continue test = test_data[testname] - lines.append([ - [ Result(testname) ], - [ Result(test['test_time']) ], - [ Result(test['test_time'], base['test_time']), - Result(test['test_time'], base['test_time'], relative=True)], - [ Result(test.get(field, None), base.get(field,None)) - for field in fields ], - [ Result(test.get(field, None)) for field in fields ] - ]) + lines.append( + [ + [Result(testname)], + [Result(test['test_time'])], + [ + Result(test['test_time'], base['test_time']), + Result(test['test_time'], base['test_time'], relative=True), + ], + [ + Result(test.get(field, None), base.get(field, None)) + for field in fields + ], + [Result(test.get(field, None)) for field in fields], + ] + ) lines.sort() return ( - [['test_name'], ['test_time'], ['time(\u0394)', 'time(%)'], - fields, fields], + [['test_name'], ['test_time'], ['time(\u0394)', 'time(%)'], fields, fields], lines, ) + def print_comparison(os, data): """Print the 'comparison' table from the data to os @@ -227,6 +241,7 @@ def print_comparison(os, data): """ _printer([2, 1, 3, 0], os, data) + def print_test_result(os, data): """Print the 'test result' table from the data to os @@ -240,28 +255,37 @@ def print_test_result(os, data): """ _printer([1, 4, 0], os, data) + def _printer(arglist, os, data): fields = sum((data[0][i] for i in arglist), []) - lines = [ sum((line[i] for i in arglist), []) for line in data[1] ] + lines = [sum((line[i] for i in arglist), []) for line in data[1]] field_w = [max(len(field), 7) for field in fields] - os.write(' '.join(('%%%ds' % w) % fields[i] - for i, w in enumerate(field_w)) + '\n') - os.write('-'*(len(field_w) + sum(field_w) - 1) + '\n') - cumul = [Result(0, 0, relative=v.relative) for i,v in enumerate(lines[0])] + os.write(' '.join(('%%%ds' % w) % fields[i] for i, w in enumerate(field_w)) + '\n') + os.write('-' * (len(field_w) + sum(field_w) - 1) + '\n') + cumul = [Result(0, 0, relative=v.relative) for i, v in enumerate(lines[0])] for line in sorted(lines): - os.write(' '.join(('%%%ds' % width) % line[i].tostr(width) - for i, width in enumerate(field_w)) + '\n') - for i,v in enumerate(line): + os.write( + ' '.join( + ('%%%ds' % width) % line[i].tostr(width) + for i, width in enumerate(field_w) + ) + + '\n' + ) + for i, v in enumerate(line): _test, _base = v.test_base_value() if isinstance(_test, str): continue cumul[i].test += _test cumul[i].base += _base - os.write('-'*(len(field_w) + sum(field_w) - 1) + '\n') + os.write('-' * (len(field_w) + sum(field_w) - 1) + '\n') cumul[-1].test = "[ TOTAL ]" - os.write(' '.join(('%%%ds' % width) % cumul[i].tostr(width) - for i, width in enumerate(field_w)) + '\n') + os.write( + ' '.join( + ('%%%ds' % width) % cumul[i].tostr(width) for i, width in enumerate(field_w) + ) + + '\n' + ) if not any(c.base for c in cumul): return for c in cumul[:-1]: @@ -269,8 +293,13 @@ def _printer(arglist, os, data): c.base = 0 c.relative = True cumul[-1].test = "[ %diff ]" - os.write(' '.join(('%%%ds' % width) % cumul[i].tostr(width) - for i, width in enumerate(field_w)) + '\n') + os.write( + ' '.join( + ('%%%ds' % width) % cumul[i].tostr(width) for i, width in enumerate(field_w) + ) + + '\n' + ) + if __name__ == '__main__': clean = '--clean' in sys.argv @@ -298,9 +327,8 @@ def _printer(arglist, os, data): for line in data[1]: name = line[0][0].test if 'nonpublic' in name: - line[0][0].test = ( - name[:name.find('.', name.find('nonpublic'))] - + (".%s" % n) + line[0][0].test = name[: name.find('.', name.find('nonpublic'))] + ( + ".%s" % n ) n += 1 print_test_result(sys.stdout, data) diff --git a/scripts/performance/compare_components.py b/scripts/performance/compare_components.py index ddd07f3dc42..01c2d42395c 100644 --- a/scripts/performance/compare_components.py +++ b/scripts/performance/compare_components.py @@ -16,17 +16,19 @@ import time import pickle -from pyomo.kernel import (block, - block_list, - variable, - variable_list, - variable_dict, - constraint, - linear_constraint, - constraint_dict, - constraint_list, - matrix_constraint, - objective) +from pyomo.kernel import ( + block, + block_list, + variable, + variable_list, + variable_dict, + constraint, + linear_constraint, + constraint_dict, + constraint_list, + matrix_constraint, + objective, +) from pyomo.core.kernel.variable import IVariable from pyomo.core.base import Integers, RangeSet, Objective @@ -47,16 +49,18 @@ pympler_kwds = {} + def _fmt(num, suffix='B'): """format memory output""" if num is None: return "" - for unit in ['','K','M','G','T','P','E','Z']: + for unit in ['', 'K', 'M', 'G', 'T', 'P', 'E', 'Z']: if abs(num) < 1000.0: return "%3.1f %s%s" % (num, unit, suffix) num /= 1000.0 return "%.1f %s%s" % (num, 'Yi', suffix) + def measure(f, n=25): """measure average execution time over n trials""" gc.collect() @@ -78,6 +82,7 @@ def measure(f, n=25): gc.collect() return mem_bytes, time_seconds + def summarize(results): """neatly summarize output for comparison of several tests""" line = "%9s %50s %12s %7s %12s %7s" @@ -89,16 +94,12 @@ def summarize(results): time_factor = "" if i > 0: if initial_mem_b is not None: - mem_factor = "(%4.2fx)" % (float(mem_b)/initial_mem_b) + mem_factor = "(%4.2fx)" % (float(mem_b) / initial_mem_b) else: mem_factory = "" - time_factor = "(%4.2fx)" % (float(time_s)/initial_time_s) - print(line % (libname, - label, - _fmt(mem_b), - mem_factor, - time_s, - time_factor)) + time_factor = "(%4.2fx)" % (float(time_s) / initial_time_s) + print(line % (libname, label, _fmt(mem_b), mem_factor, time_s, time_factor)) + def build_Var(): """Build a Var and delete any references to external @@ -108,6 +109,7 @@ def build_Var(): obj._domain = None return obj + def build_GeneralVarData(): """Build a _GeneralVarData and delete any references to external objects so its size can be computed.""" @@ -115,122 +117,151 @@ def build_GeneralVarData(): obj._domain = None return obj + def build_variable(): """Build a variable with no references to external objects so its size can be computed.""" - return variable(domain_type=None, - lb=None, - ub=None) + return variable(domain_type=None, lb=None, ub=None) + class _staticvariable(IVariable): """An _example_ of a more lightweight variable.""" + _ctype = IVariable domain_type = None lb = None ub = None fixed = False stale = False - __slots__ = ("value","_parent","_storage_key","_active") + __slots__ = ("value", "_parent", "_storage_key", "_active") + def __init__(self): self.value = None self._parent = None self._storage_key = None self._active = True + def build_staticvariable(): """Build a static variable with no references to external objects so its size can be computed.""" return _staticvariable() + def build_Constraint(): """Build a Constraint and delete any references to external objects so its size can be computed.""" - expr = sum(x*c for x,c in zip(build_Constraint.xlist, - build_Constraint.clist)) + expr = sum(x * c for x, c in zip(build_Constraint.xlist, build_Constraint.clist)) obj = Constraint(expr=(0, expr, 1)) obj._parent = build_Constraint.dummy_parent obj.construct() obj._parent = None return obj + + build_Constraint.xlist = [build_GeneralVarData() for i in range(5)] build_Constraint.clist = [1.1 for i in range(5)] build_Constraint.dummy_parent = lambda: None + def build_GeneralConstraintData(): """Build a _GeneralConstraintData and delete any references to external objects so its size can be computed.""" - expr = sum(x*c for x,c in zip(build_Constraint.xlist, - build_Constraint.clist)) + expr = sum(x * c for x, c in zip(build_Constraint.xlist, build_Constraint.clist)) return _GeneralConstraintData(expr=(0, expr, 1)) + + build_Constraint.xlist = [build_GeneralVarData() for i in range(5)] build_Constraint.clist = [1.1 for i in range(5)] + def build_constraint(): """Build a constraint with no references to external objects so its size can be computed.""" - expr = sum(x*c for x,c in zip(build_constraint.xlist, - build_constraint.clist)) + expr = sum(x * c for x, c in zip(build_constraint.xlist, build_constraint.clist)) return constraint(lb=0, body=expr, ub=1) + + build_constraint.xlist = [build_variable() for i in range(5)] build_constraint.clist = [1.1 for i in range(5)] + def build_linear_constraint(): """Build a linear_constraint with no references to external objects so its size can be computed.""" - return linear_constraint(variables=build_linear_constraint.xlist, - coefficients=build_linear_constraint.clist, - lb=0, ub=1) + return linear_constraint( + variables=build_linear_constraint.xlist, + coefficients=build_linear_constraint.clist, + lb=0, + ub=1, + ) + + build_linear_constraint.xlist = [build_variable() for i in range(5)] build_linear_constraint.clist = [1.1 for i in range(5)] + def _bounds_rule(m, i): return (None, None) + + def _initialize_rule(m, i): return None + + def _reset(): build_indexed_Var.model = Block(concrete=True) - build_indexed_Var.model.ndx = RangeSet(0, N-1) + build_indexed_Var.model.ndx = RangeSet(0, N - 1) build_indexed_Var.bounds_rule = _bounds_rule build_indexed_Var.initialize_rule = _initialize_rule + + def build_indexed_Var(): """Build an indexed Var with no references to external objects so its size can be computed.""" model = build_indexed_Var.model - model.indexed_Var = Var(model.ndx, - domain=Integers, - bounds=build_indexed_Var.bounds_rule, - initialize=build_indexed_Var.initialize_rule) + model.indexed_Var = Var( + model.ndx, + domain=Integers, + bounds=build_indexed_Var.bounds_rule, + initialize=build_indexed_Var.initialize_rule, + ) model.indexed_Var._domain = None model.indexed_Var._component = None return model.indexed_Var + + build_indexed_Var.reset_for_test = _reset build_indexed_Var.reset_for_test() + def build_variable_dict(): """Build a variable_dict with no references to external objects so its size can be computed.""" return variable_dict( - ((i, variable(domain_type=None, lb=None, ub=None, value=None)) - for i in range(N))) + ( + (i, variable(domain_type=None, lb=None, ub=None, value=None)) + for i in range(N) + ) + ) + def build_variable_list(): """Build a variable_list with no references to external objects so its size can be computed.""" return variable_list( - variable(domain_type=None, lb=None, ub=None, value=None) - for i in range(N)) + variable(domain_type=None, lb=None, ub=None, value=None) for i in range(N) + ) + def build_staticvariable_list(): """Build a variable_list of static variables with no references to external objects so its size can be computed.""" - return variable_list(_staticvariable() - for i in range(N)) + return variable_list(_staticvariable() for i in range(N)) -A = scipy.sparse.random(N, N, - density=0.2, - format='csr', - dtype=float) + +A = scipy.sparse.random(N, N, density=0.2, format='csr', dtype=float) b = numpy.ones(N) # as lists A_data = A.data.tolist() @@ -240,75 +271,118 @@ def build_staticvariable_list(): X_aml = [build_GeneralVarData() for i in range(N)] X_kernel = [build_variable() for i in range(N)] + def _con_rule(m, i): # expr == rhs - return (sum(A_data[p]*X_aml[A_indices[p]] - for p in range(A_indptr[i], A_indptr[i+1])), 1) + return ( + sum( + A_data[p] * X_aml[A_indices[p]] for p in range(A_indptr[i], A_indptr[i + 1]) + ), + 1, + ) + + def _reset(): build_indexed_Constraint.model = Block(concrete=True) - build_indexed_Constraint.model.ndx = RangeSet(0, N-1) + build_indexed_Constraint.model.ndx = RangeSet(0, N - 1) build_indexed_Constraint.rule = _con_rule + + def build_indexed_Constraint(): """Build an indexed Constraint with no references to external objects so its size can be computed.""" model = build_indexed_Constraint.model - model.indexed_Constraint = Constraint(model.ndx, - rule=build_indexed_Constraint.rule) + model.indexed_Constraint = Constraint(model.ndx, rule=build_indexed_Constraint.rule) model.indexed_Constraint._component = None return model.indexed_Constraint + + build_indexed_Constraint.reset_for_test = _reset build_indexed_Constraint.reset_for_test() + def build_constraint_dict(): """Build a constraint_dict with no references to external objects so its size can be computed.""" return constraint_dict( - ((i, constraint(rhs=1, body=sum(A_data[p]*X_kernel[A_indices[p]] - for p in range(A_indptr[i], A_indptr[i+1])))) - for i in range(N))) + ( + ( + i, + constraint( + rhs=1, + body=sum( + A_data[p] * X_kernel[A_indices[p]] + for p in range(A_indptr[i], A_indptr[i + 1]) + ), + ), + ) + for i in range(N) + ) + ) + def build_constraint_list(): """Build a constraint_list with no references to external objects so its size can be computed.""" return constraint_list( - constraint(rhs=1, body=sum(A_data[p]*X_kernel[A_indices[p]] - for p in range(A_indptr[i], A_indptr[i+1]))) - for i in range(N)) + constraint( + rhs=1, + body=sum( + A_data[p] * X_kernel[A_indices[p]] + for p in range(A_indptr[i], A_indptr[i + 1]) + ), + ) + for i in range(N) + ) + def build_linear_constraint_list(): """Build a constraint_list of linear_constraints with no references to external objects so its size can be computed.""" return constraint_list( - linear_constraint(variables=(X_kernel[A_indices[p]] for p in range(A_indptr[i], A_indptr[i+1])), - coefficients=(A_data[p] for p in range(A_indptr[i], A_indptr[i+1])), - rhs=1) - for i in range(N)) + linear_constraint( + variables=( + X_kernel[A_indices[p]] for p in range(A_indptr[i], A_indptr[i + 1]) + ), + coefficients=(A_data[p] for p in range(A_indptr[i], A_indptr[i + 1])), + rhs=1, + ) + for i in range(N) + ) + def build_matrix_constraint(): """Build a constraint_list with no references to external objects so its size can be computed.""" return matrix_constraint(A, rhs=b, x=X_kernel) + def build_Block(): """Build a Block with a few components.""" obj = Block(concrete=True) obj.construct() return obj + def build_BlockData(): """Build a _BlockData with a few components.""" obj = _BlockData(build_BlockData.owner) obj._component = None return obj + + build_BlockData.owner = Block() + def build_block(): b = block() b._activate_large_storage_mode() return b + build_small_block = block + def build_Block_with_objects(): """Build an empty Block""" obj = Block(concrete=True) @@ -319,6 +393,7 @@ def build_Block_with_objects(): obj.o = Objective() return obj + def build_BlockData_with_objects(): """Build an empty _BlockData""" obj = _BlockData(build_BlockData_with_objects.owner) @@ -328,8 +403,11 @@ def build_BlockData_with_objects(): obj.o = Objective() obj._component = None return obj + + build_BlockData_with_objects.owner = Block() + def build_block_with_objects(): """Build an empty block.""" b = block() @@ -339,6 +417,7 @@ def build_block_with_objects(): b.o = objective() return b + def build_small_block_with_objects(): """Build an empty block.""" b = block() @@ -347,6 +426,7 @@ def build_small_block_with_objects(): b.o = objective() return b + def _indexed_Block_rule(b, i): b.x1 = Var() b.x1._domain = None @@ -359,18 +439,26 @@ def _indexed_Block_rule(b, i): b.x5 = Var() b.x5._domain = None return b + + def _reset(): build_indexed_BlockWVars.model = Block(concrete=True) - build_indexed_BlockWVars.model.ndx = RangeSet(0, N-1) + build_indexed_BlockWVars.model.ndx = RangeSet(0, N - 1) build_indexed_BlockWVars.indexed_Block_rule = _indexed_Block_rule + + def build_indexed_BlockWVars(): model = build_indexed_BlockWVars.model - model.indexed_Block = Block(model.ndx, - rule=build_indexed_BlockWVars.indexed_Block_rule) + model.indexed_Block = Block( + model.ndx, rule=build_indexed_BlockWVars.indexed_Block_rule + ) return model.indexed_Block + + build_indexed_BlockWVars.reset_for_test = _reset build_indexed_BlockWVars.reset_for_test() + def build_block_list_with_variables(): blist = block_list() for i in range(N): @@ -384,6 +472,7 @@ def build_block_list_with_variables(): blist.append(b) return blist + def _get_small_block(): b = block() b.x1 = variable(domain_type=None, lb=None, ub=None) @@ -393,12 +482,14 @@ def _get_small_block(): b.x5 = variable(domain_type=None, lb=None, ub=None) return b + def build_small_block_list_with_variables(): - return block_list( - build_small_block_list_with_variables.myblock() - for i in range(N)) + return block_list(build_small_block_list_with_variables.myblock() for i in range(N)) + + build_small_block_list_with_variables.myblock = _get_small_block + def _get_small_block_wstaticvars(): myvar = _staticvariable b = block() @@ -409,10 +500,13 @@ def _get_small_block_wstaticvars(): b.x5 = myvar() return b + def build_small_block_list_with_staticvariables(): return block_list( - build_small_block_list_with_staticvariables.myblock() - for i in range(N)) + build_small_block_list_with_staticvariables.myblock() for i in range(N) + ) + + build_small_block_list_with_staticvariables.myblock = _get_small_block_wstaticvars @@ -438,7 +532,9 @@ def build_small_block_list_with_staticvariables(): results.append(("AML", "Indexed Var (%s)" % N, measure(build_indexed_Var))) results.append(("Kernel", "variable_dict (%s)" % N, measure(build_variable_dict))) results.append(("Kernel", "variable_list (%s)" % N, measure(build_variable_list))) - results.append(("", "staticvariable_list (%s)" % N, measure(build_staticvariable_list))) + results.append( + ("", "staticvariable_list (%s)" % N, measure(build_staticvariable_list)) + ) summarize(results) print("") @@ -447,9 +543,19 @@ def build_small_block_list_with_staticvariables(): # implementations # results = [] - results.append(("AML", "Constraint()", measure(build_Constraint))) - results.append(("AML", "_GeneralConstraintData()", measure(build_GeneralConstraintData))) - results.append(("Kernel", "constraint()", measure(build_constraint))) + results.append( + ("AML", "Constraint()", measure(build_Constraint)) + ) + results.append( + ( + "AML", + "_GeneralConstraintData()", + measure(build_GeneralConstraintData), + ) + ) + results.append( + ("Kernel", "constraint()", measure(build_constraint)) + ) results.append(("Kernel", "linear_constraint", measure(build_linear_constraint))) summarize(results) print("") @@ -459,11 +565,25 @@ def build_small_block_list_with_staticvariables(): # container implementations # results = [] - results.append(("AML", "Indexed Constraint (%s)" % N, measure(build_indexed_Constraint))) - results.append(("Kernel", "constraint_dict (%s)" % N, measure(build_constraint_dict))) - results.append(("Kernel", "constraint_list (%s)" % N, measure(build_constraint_list))) - results.append(("Kernel", "linear_constraint_list (%s)" % N, measure(build_linear_constraint_list))) - results.append(("Kernel", "matrix_constraint (%s)" % N, measure(build_matrix_constraint))) + results.append( + ("AML", "Indexed Constraint (%s)" % N, measure(build_indexed_Constraint)) + ) + results.append( + ("Kernel", "constraint_dict (%s)" % N, measure(build_constraint_dict)) + ) + results.append( + ("Kernel", "constraint_list (%s)" % N, measure(build_constraint_list)) + ) + results.append( + ( + "Kernel", + "linear_constraint_list (%s)" % N, + measure(build_linear_constraint_list), + ) + ) + results.append( + ("Kernel", "matrix_constraint (%s)" % N, measure(build_matrix_constraint)) + ) summarize(results) print("") @@ -481,9 +601,19 @@ def build_small_block_list_with_staticvariables(): results = [] results.append(("AML", "Block w/ 3 components", measure(build_Block_with_objects))) - results.append(("AML", "_BlockData w/ 3 components", measure(build_BlockData_with_objects))) - results.append(("Kernel", "block w/ 3 components", measure(build_block_with_objects))) - results.append(("Kernel", "small_block w/ 3 components", measure(build_small_block_with_objects))) + results.append( + ("AML", "_BlockData w/ 3 components", measure(build_BlockData_with_objects)) + ) + results.append( + ("Kernel", "block w/ 3 components", measure(build_block_with_objects)) + ) + results.append( + ( + "Kernel", + "small_block w/ 3 components", + measure(build_small_block_with_objects), + ) + ) summarize(results) print("") @@ -492,12 +622,28 @@ def build_small_block_list_with_staticvariables(): # container implementations # results = [] - results.append(("AML", "Indexed Block (%s) w/ Vars (5)" % N, - measure(build_indexed_BlockWVars))) - results.append(("Kernel", "block_list (%s) w/ variables (5)" % N, - measure(build_block_list_with_variables))) - results.append(("Kernel", "small_block_list (%s) w/ variables (5)" % N, - measure(build_small_block_list_with_variables))) - results.append(("", "small_block_list (%s) w/ staticvariables (5)" % N, - measure(build_small_block_list_with_staticvariables))) + results.append( + ("AML", "Indexed Block (%s) w/ Vars (5)" % N, measure(build_indexed_BlockWVars)) + ) + results.append( + ( + "Kernel", + "block_list (%s) w/ variables (5)" % N, + measure(build_block_list_with_variables), + ) + ) + results.append( + ( + "Kernel", + "small_block_list (%s) w/ variables (5)" % N, + measure(build_small_block_list_with_variables), + ) + ) + results.append( + ( + "", + "small_block_list (%s) w/ staticvariables (5)" % N, + measure(build_small_block_list_with_staticvariables), + ) + ) summarize(results) diff --git a/scripts/performance/expr_perf.py b/scripts/performance/expr_perf.py index 3cd19994be5..6055c1a5952 100644 --- a/scripts/performance/expr_perf.py +++ b/scripts/performance/expr_perf.py @@ -5,17 +5,19 @@ from pyomo.environ import * import pyomo.version from pyomo.core.base.expr_common import _clear_expression_pool -from pyomo.core.base import expr as EXPR +from pyomo.core.base import expr as EXPR import pprint as pp import gc import time + try: import pympler - pympler_available=True + + pympler_available = True pympler_kwds = {} except: - pympler_available=False + pympler_available = False import sys import argparse @@ -35,36 +37,59 @@ # Dummy Sum() function used for Coopr3 tests # if coopr3_or_pyomo4: + def Sum(*args): return sum(*args) + class TimeoutError(Exception): pass + class timeout: def __init__(self, seconds=10, error_message='Timeout'): self.seconds = seconds self.error_message = error_message + def handle_timeout(self, signum, frame): raise TimeoutError(self.error_message) + def __enter__(self): signal.signal(signal.SIGALRM, self.handle_timeout) signal.alarm(self.seconds) + def __exit__(self, type, value, traceback): signal.alarm(0) - _timeout = 20 -#NTerms = 100 -#N = 1 +# NTerms = 100 +# N = 1 NTerms = 100000 -N = 30 +N = 30 parser = argparse.ArgumentParser() -parser.add_argument("-o", "--output", help="Save results to the specified file", action="store", default=None) -parser.add_argument("--nterms", help="The number of terms in test expressions", action="store", type=int, default=None) -parser.add_argument("--ntrials", help="The number of test trials", action="store", type=int, default=None) +parser.add_argument( + "-o", + "--output", + help="Save results to the specified file", + action="store", + default=None, +) +parser.add_argument( + "--nterms", + help="The number of terms in test expressions", + action="store", + type=int, + default=None, +) +parser.add_argument( + "--ntrials", + help="The number of test trials", + action="store", + type=int, + default=None, +) args = parser.parse_args() if args.nterms: @@ -74,7 +99,6 @@ def __exit__(self, type, value, traceback): print("NTerms %d NTrials %d\n\n" % (NTerms, N)) - # # Execute a function 'n' times, collecting performance statistics and # averaging them @@ -92,11 +116,12 @@ def measure(f, n=25): for key in data[0]: d_ = [] for i in range(n): - d_.append( data[i][key] ) - ans[key] = {"mean": sum(d_)/float(n), "data": d_} + d_.append(data[i][key]) + ans[key] = {"mean": sum(d_) / float(n), "data": d_} # return ans + # # Evaluate standard operations on an expression # @@ -117,7 +142,7 @@ def evaluate(expr, seconds): start = time.time() expr = EXPR.compress_expression(expr, verbose=False) stop = time.time() - seconds['compress'] = stop-start + seconds['compress'] = stop - start seconds['compressed_size'] = expr.size() except TimeoutError: print("TIMEOUT") @@ -134,7 +159,7 @@ def evaluate(expr, seconds): start = time.time() expr_ = expr.clone() stop = time.time() - seconds['clone'] = stop-start + seconds['clone'] = stop - start except RecursionError: seconds['clone'] = -888.0 except TimeoutError: @@ -148,7 +173,7 @@ def evaluate(expr, seconds): start = time.time() d_ = expr.polynomial_degree() stop = time.time() - seconds['polynomial_degree'] = stop-start + seconds['polynomial_degree'] = stop - start except RecursionError: seconds['polynomial_degree'] = -888.0 except TimeoutError: @@ -162,7 +187,7 @@ def evaluate(expr, seconds): start = time.time() s_ = expr.is_constant() stop = time.time() - seconds['is_constant'] = stop-start + seconds['is_constant'] = stop - start except RecursionError: seconds['is_constant'] = -888.0 except TimeoutError: @@ -176,7 +201,7 @@ def evaluate(expr, seconds): start = time.time() s_ = expr.is_fixed() stop = time.time() - seconds['is_fixed'] = stop-start + seconds['is_fixed'] = stop - start except RecursionError: seconds['is_fixed'] = -888.0 except TimeoutError: @@ -188,11 +213,12 @@ def evaluate(expr, seconds): _clear_expression_pool() try: from pyomo.repn import generate_standard_repn + with timeout(seconds=_timeout): start = time.time() r_ = generate_standard_repn(expr, quadratic=False) stop = time.time() - seconds['generate_repn'] = stop-start + seconds['generate_repn'] = stop - start except RecursionError: seconds['generate_repn'] = -888.0 except ImportError: @@ -208,7 +234,7 @@ def evaluate(expr, seconds): start = time.time() s_ = expr.is_constant() stop = time.time() - seconds['is_constant'] = stop-start + seconds['is_constant'] = stop - start except RecursionError: seconds['is_constant'] = -888.0 except TimeoutError: @@ -222,7 +248,7 @@ def evaluate(expr, seconds): start = time.time() s_ = expr.is_fixed() stop = time.time() - seconds['is_fixed'] = stop-start + seconds['is_fixed'] = stop - start except RecursionError: seconds['is_fixed'] = -888.0 except TimeoutError: @@ -234,11 +260,12 @@ def evaluate(expr, seconds): _clear_expression_pool() try: from pyomo.repn import generate_ampl_repn + with timeout(seconds=_timeout): start = time.time() r_ = generate_ampl_repn(expr) stop = time.time() - seconds['generate_repn'] = stop-start + seconds['generate_repn'] = stop - start except RecursionError: seconds['generate_repn'] = -888.0 except ImportError: @@ -249,6 +276,7 @@ def evaluate(expr, seconds): return seconds + # # Evaluate standard operations on an expression # @@ -266,7 +294,7 @@ def evaluate_all(expr, seconds): for e in expr: e.clone() stop = time.time() - seconds['clone'] = stop-start + seconds['clone'] = stop - start except RecursionError: seconds['clone'] = -888.0 except TimeoutError: @@ -281,7 +309,7 @@ def evaluate_all(expr, seconds): for e in expr: e.polynomial_degree() stop = time.time() - seconds['polynomial_degree'] = stop-start + seconds['polynomial_degree'] = stop - start except RecursionError: seconds['polynomial_degree'] = -888.0 except TimeoutError: @@ -296,7 +324,7 @@ def evaluate_all(expr, seconds): for e in expr: e.is_constant() stop = time.time() - seconds['is_constant'] = stop-start + seconds['is_constant'] = stop - start except RecursionError: seconds['is_constant'] = -888.0 except TimeoutError: @@ -311,7 +339,7 @@ def evaluate_all(expr, seconds): for e in expr: e.is_fixed() stop = time.time() - seconds['is_fixed'] = stop-start + seconds['is_fixed'] = stop - start except RecursionError: seconds['is_fixed'] = -888.0 except TimeoutError: @@ -323,20 +351,22 @@ def evaluate_all(expr, seconds): _clear_expression_pool() if True: from pyomo.repn import generate_standard_repn + with timeout(seconds=_timeout): start = time.time() for e in expr: generate_standard_repn(e, quadratic=False) stop = time.time() - seconds['generate_repn'] = stop-start + seconds['generate_repn'] = stop - start try: from pyomo.repn import generate_standard_repn + with timeout(seconds=_timeout): start = time.time() for e in expr: generate_standard_repn(e, quadratic=False) stop = time.time() - seconds['generate_repn'] = stop-start + seconds['generate_repn'] = stop - start except RecursionError: seconds['generate_repn'] = -888.0 except ImportError: @@ -350,12 +380,13 @@ def evaluate_all(expr, seconds): _clear_expression_pool() try: from pyomo.repn import generate_ampl_repn + with timeout(seconds=_timeout): start = time.time() for e in expr: generate_ampl_repn(e) stop = time.time() - seconds['generate_repn'] = stop-start + seconds['generate_repn'] = stop - start except RecursionError: seconds['generate_repn'] = -888.0 except ImportError: @@ -366,11 +397,11 @@ def evaluate_all(expr, seconds): return seconds + # # Create a linear expression # def linear(N, flag): - def f(): seconds = {} @@ -397,52 +428,52 @@ def f(): stop = time.time() elif flag == 2: start = time.time() - expr=sum(model.p[i]*model.x[i] for i in model.A) + expr = sum(model.p[i] * model.x[i] for i in model.A) stop = time.time() elif flag == 3: start = time.time() - expr=0 + expr = 0 for i in model.A: expr += model.p[i] * model.x[i] stop = time.time() elif flag == 4: start = time.time() - expr=0 + expr = 0 for i in model.A: expr = expr + model.p[i] * model.x[i] stop = time.time() elif flag == 5: start = time.time() - expr=0 + expr = 0 for i in model.A: expr = model.p[i] * model.x[i] + expr stop = time.time() elif flag == 6: start = time.time() - expr=Sum(model.p[i]*model.x[i] for i in model.A) + expr = Sum(model.p[i] * model.x[i] for i in model.A) stop = time.time() elif flag == 7: start = time.time() - expr=0 + expr = 0 for i in model.A: expr += model.p[i] * (1 + model.x[i]) stop = time.time() elif flag == 8: start = time.time() - expr=0 + expr = 0 for i in model.A: - expr += (model.x[i]+model.x[i]) + expr += model.x[i] + model.x[i] stop = time.time() elif flag == 9: start = time.time() - expr=0 + expr = 0 for i in model.A: - expr += model.p[i]*(model.x[i]+model.x[i]) + expr += model.p[i] * (model.x[i] + model.x[i]) stop = time.time() elif flag == 12: start = time.time() with EXPR.linear_expression as expr: - expr=sum((model.p[i]*model.x[i] for i in model.A), expr) + expr = sum((model.p[i] * model.x[i] for i in model.A), expr) stop = time.time() elif flag == 13: start = time.time() @@ -469,7 +500,7 @@ def f(): expr += model.p[i] * (1 + model.x[i]) stop = time.time() # - seconds['construction'] = stop-start + seconds['construction'] = stop - start seconds['nclones'] = ctr.count - nclones seconds = evaluate(expr, seconds) except RecursionError: @@ -481,11 +512,11 @@ def f(): return f + # # Create a linear expression # def simple_linear(N, flag): - def f(): seconds = {} @@ -508,52 +539,52 @@ def f(): stop = time.time() elif flag == 2: start = time.time() - expr=sum(model.p[i]*model.x[i] for i in model.A) + expr = sum(model.p[i] * model.x[i] for i in model.A) stop = time.time() elif flag == 3: start = time.time() - expr=0 + expr = 0 for i in model.A: expr += model.p[i] * model.x[i] stop = time.time() elif flag == 4: start = time.time() - expr=0 + expr = 0 for i in model.A: expr = expr + model.p[i] * model.x[i] stop = time.time() elif flag == 5: start = time.time() - expr=0 + expr = 0 for i in model.A: expr = model.p[i] * model.x[i] + expr stop = time.time() elif flag == 6: start = time.time() - expr=Sum(model.p[i]*model.x[i] for i in model.A) + expr = Sum(model.p[i] * model.x[i] for i in model.A) stop = time.time() elif flag == 7: start = time.time() - expr=0 + expr = 0 for i in model.A: expr += model.p[i] * (1 + model.x[i]) stop = time.time() elif flag == 8: start = time.time() - expr=0 + expr = 0 for i in model.A: - expr += (model.x[i]+model.x[i]) + expr += model.x[i] + model.x[i] stop = time.time() elif flag == 9: start = time.time() - expr=0 + expr = 0 for i in model.A: - expr += model.p[i]*(model.x[i]+model.x[i]) + expr += model.p[i] * (model.x[i] + model.x[i]) stop = time.time() elif flag == 12: start = time.time() with EXPR.linear_expression as expr: - expr=sum((model.p[i]*model.x[i] for i in model.A), expr) + expr = sum((model.p[i] * model.x[i] for i in model.A), expr) stop = time.time() elif flag == 13: start = time.time() @@ -580,7 +611,7 @@ def f(): expr += model.p[i] * (1 + model.x[i]) stop = time.time() # - seconds['construction'] = stop-start + seconds['construction'] = stop - start seconds['nclones'] = ctr.count - nclones seconds = evaluate(expr, seconds) except RecursionError: @@ -592,11 +623,11 @@ def f(): return f + # # Create a nested linear expression # def nested_linear(N, flag): - def f(): seconds = {} @@ -618,29 +649,31 @@ def f(): start = time.time() # if flag == 1: - expr = 2* sum_product(model.p, model.x) + expr = 2 * sum_product(model.p, model.x) elif flag == 2: - expr= 2 * sum(model.p[i]*model.x[i] for i in model.A) + expr = 2 * sum(model.p[i] * model.x[i] for i in model.A) elif flag == 3: - expr=0 + expr = 0 for i in model.A: expr += model.p[i] * model.x[i] expr *= 2 elif flag == 4: - expr=0 + expr = 0 for i in model.A: expr = expr + model.p[i] * model.x[i] expr *= 2 elif flag == 5: - expr=0 + expr = 0 for i in model.A: expr = model.p[i] * model.x[i] + expr expr *= 2 elif flag == 6: - expr= 2 * Sum(model.p[i]*model.x[i] for i in model.A) + expr = 2 * Sum(model.p[i] * model.x[i] for i in model.A) elif flag == 12: with EXPR.linear_expression as expr: - expr= 2 * sum((model.p[i]*model.x[i] for i in model.A), expr) + expr = 2 * sum( + (model.p[i] * model.x[i] for i in model.A), expr + ) elif flag == 13: with EXPR.linear_expression as expr: for i in model.A: @@ -658,7 +691,7 @@ def f(): expr *= 2 # stop = time.time() - seconds['construction'] = stop-start + seconds['construction'] = stop - start seconds['nclones'] = ctr.count - nclones seconds = evaluate(expr, seconds) except RecursionError: @@ -675,7 +708,6 @@ def f(): # Create a constant expression from mutable parameters # def constant(N, flag): - def f(): seconds = {} @@ -696,23 +728,23 @@ def f(): if flag == 1: expr = sum_product(model.p, model.q, index=model.A) elif flag == 2: - expr=sum(model.p[i]*model.q[i] for i in model.A) + expr = sum(model.p[i] * model.q[i] for i in model.A) elif flag == 3: - expr=0 + expr = 0 for i in model.A: expr += model.p[i] * model.q[i] elif flag == 4: - expr=Sum(model.p[i]*model.q[i] for i in model.A) + expr = Sum(model.p[i] * model.q[i] for i in model.A) elif flag == 12: with EXPR.linear_expression as expr: - expr=sum((model.p[i]*model.q[i] for i in model.A), expr) + expr = sum((model.p[i] * model.q[i] for i in model.A), expr) elif flag == 13: with EXPR.linear_expression as expr: for i in model.A: expr += model.p[i] * model.q[i] # stop = time.time() - seconds['construction'] = stop-start + seconds['construction'] = stop - start seconds['nclones'] = ctr.count - nclones seconds = evaluate(expr, seconds) except RecursionError: @@ -729,7 +761,6 @@ def f(): # Create a bilinear expression # def bilinear(N, flag): - def f(): seconds = {} @@ -756,23 +787,30 @@ def f(): if flag == 1: expr = sum_product(model.p, model.x, model.y) elif flag == 2: - expr=sum(model.p[i]*model.x[i]*model.y[i] for i in model.A) + expr = sum( + model.p[i] * model.x[i] * model.y[i] for i in model.A + ) elif flag == 3: - expr=0 + expr = 0 for i in model.A: expr += model.p[i] * model.x[i] * model.y[i] elif flag == 4: - expr=Sum(model.p[i]*model.x[i]*model.y[i] for i in model.A) + expr = Sum( + model.p[i] * model.x[i] * model.y[i] for i in model.A + ) elif flag == 12: with EXPR.quadratic_expression as expr: - expr=sum((model.p[i]*model.x[i]*model.y[i] for i in model.A), expr) + expr = sum( + (model.p[i] * model.x[i] * model.y[i] for i in model.A), + expr, + ) elif flag == 13: with EXPR.quadratic_expression as expr: for i in model.A: expr += model.p[i] * model.x[i] * model.y[i] # stop = time.time() - seconds['construction'] = stop-start + seconds['construction'] = stop - start seconds['nclones'] = ctr.count - nclones seconds = evaluate(expr, seconds) except RecursionError: @@ -789,7 +827,6 @@ def f(): # Create a simple nonlinear expression # def nonlinear(N, flag): - def f(): seconds = {} @@ -812,23 +849,25 @@ def f(): start = time.time() # if flag == 2: - expr=sum(model.p[i]*tan(model.x[i]) for i in model.A) + expr = sum(model.p[i] * tan(model.x[i]) for i in model.A) elif flag == 3: - expr=0 + expr = 0 for i in model.A: expr += model.p[i] * tan(model.x[i]) elif flag == 4: - expr=Sum(model.p[i]*tan(model.x[i]) for i in model.A) + expr = Sum(model.p[i] * tan(model.x[i]) for i in model.A) if flag == 12: with EXPR.nonlinear_expression as expr: - expr=sum((model.p[i]*tan(model.x[i]) for i in model.A), expr) + expr = sum( + (model.p[i] * tan(model.x[i]) for i in model.A), expr + ) elif flag == 13: with EXPR.nonlinear_expression as expr: for i in model.A: expr += model.p[i] * tan(model.x[i]) # stop = time.time() - seconds['construction'] = stop-start + seconds['construction'] = stop - start seconds['nclones'] = ctr.count - nclones seconds = evaluate(expr, seconds) except RecursionError: @@ -845,7 +884,6 @@ def f(): # Create an expression that is a complex polynomial # def polynomial(N, flag): - def f(): seconds = {} @@ -864,12 +902,12 @@ def f(): start = time.time() # if True: - expr=0 + expr = 0 for i in model.A: expr = model.x[i] * (1 + expr) # stop = time.time() - seconds['construction'] = stop-start + seconds['construction'] = stop - start seconds['nclones'] = ctr.count - nclones seconds = evaluate(expr, seconds) except RecursionError: @@ -886,7 +924,6 @@ def f(): # Create an expression that is a large product # def product(N, flag): - def f(): seconds = {} @@ -905,18 +942,18 @@ def f(): start = time.time() # if flag == 1: - expr=model.x+model.x + expr = model.x + model.x for i in model.A: - expr = model.p[i]*expr + expr = model.p[i] * expr elif flag == 2: - expr=model.x+model.x + expr = model.x + model.x for i in model.A: expr *= model.p[i] elif flag == 3: - expr=(model.x+model.x) * prod(model.p[i] for i in model.A) + expr = (model.x + model.x) * prod(model.p[i] for i in model.A) # stop = time.time() - seconds['construction'] = stop-start + seconds['construction'] = stop - start seconds['nclones'] = ctr.count - nclones seconds = evaluate(expr, seconds) except RecursionError: @@ -933,7 +970,6 @@ def f(): # Create many small linear expressions # def many_linear(N, flag): - def f(): seconds = {} @@ -954,10 +990,10 @@ def f(): expr = [] if flag == 2: for i in model.A: - expr.append( model.x[1] + model.x[i] ) + expr.append(model.x[1] + model.x[i]) # stop = time.time() - seconds['construction'] = stop-start + seconds['construction'] = stop - start seconds['nclones'] = ctr.count - nclones seconds = evaluate_all(expr, seconds) try: @@ -971,6 +1007,7 @@ def f(): return f + # # Utility function used by runall() # @@ -982,7 +1019,7 @@ def print_results(factors_, ans_, output): # -# Run the experiments and populate the dictionary 'res' +# Run the experiments and populate the dictionary 'res' # with the mapping: factors -> performance results # # Performance results are a mapping: name -> seconds @@ -990,269 +1027,265 @@ def print_results(factors_, ans_, output): def runall(factors, res, output=True): if True: - factors_ = tuple(factors+['ManyLinear','Loop 2']) + factors_ = tuple(factors + ['ManyLinear', 'Loop 2']) ans_ = res[factors_] = measure(many_linear(NTerms, 2), n=N) print_results(factors_, ans_, output) if True: - factors_ = tuple(factors+['Constant','Loop 1']) + factors_ = tuple(factors + ['Constant', 'Loop 1']) ans_ = res[factors_] = measure(constant(NTerms, 1), n=N) print_results(factors_, ans_, output) - factors_ = tuple(factors+['Constant','Loop 2']) + factors_ = tuple(factors + ['Constant', 'Loop 2']) ans_ = res[factors_] = measure(constant(NTerms, 2), n=N) print_results(factors_, ans_, output) - factors_ = tuple(factors+['Constant','Loop 12']) + factors_ = tuple(factors + ['Constant', 'Loop 12']) ans_ = res[factors_] = measure(constant(NTerms, 12), n=N) print_results(factors_, ans_, output) - factors_ = tuple(factors+['Constant','Loop 3']) + factors_ = tuple(factors + ['Constant', 'Loop 3']) ans_ = res[factors_] = measure(constant(NTerms, 3), n=N) print_results(factors_, ans_, output) - factors_ = tuple(factors+['Constant','Loop 13']) + factors_ = tuple(factors + ['Constant', 'Loop 13']) ans_ = res[factors_] = measure(constant(NTerms, 13), n=N) print_results(factors_, ans_, output) - factors_ = tuple(factors+['Constant','Loop 4']) + factors_ = tuple(factors + ['Constant', 'Loop 4']) ans_ = res[factors_] = measure(constant(NTerms, 4), n=N) print_results(factors_, ans_, output) if True: - factors_ = tuple(factors+['Linear','Loop 1']) + factors_ = tuple(factors + ['Linear', 'Loop 1']) ans_ = res[factors_] = measure(linear(NTerms, 1), n=N) print_results(factors_, ans_, output) - factors_ = tuple(factors+['Linear','Loop 2']) + factors_ = tuple(factors + ['Linear', 'Loop 2']) ans_ = res[factors_] = measure(linear(NTerms, 2), n=N) print_results(factors_, ans_, output) - factors_ = tuple(factors+['Linear','Loop 12']) + factors_ = tuple(factors + ['Linear', 'Loop 12']) ans_ = res[factors_] = measure(linear(NTerms, 12), n=N) print_results(factors_, ans_, output) - factors_ = tuple(factors+['Linear','Loop 3']) + factors_ = tuple(factors + ['Linear', 'Loop 3']) ans_ = res[factors_] = measure(linear(NTerms, 3), n=N) print_results(factors_, ans_, output) - factors_ = tuple(factors+['Linear','Loop 13']) + factors_ = tuple(factors + ['Linear', 'Loop 13']) ans_ = res[factors_] = measure(linear(NTerms, 13), n=N) print_results(factors_, ans_, output) - factors_ = tuple(factors+['Linear','Loop 4']) + factors_ = tuple(factors + ['Linear', 'Loop 4']) ans_ = res[factors_] = measure(linear(NTerms, 4), n=N) print_results(factors_, ans_, output) - factors_ = tuple(factors+['Linear','Loop 14']) + factors_ = tuple(factors + ['Linear', 'Loop 14']) ans_ = res[factors_] = measure(linear(NTerms, 14), n=N) print_results(factors_, ans_, output) - factors_ = tuple(factors+['Linear','Loop 5']) + factors_ = tuple(factors + ['Linear', 'Loop 5']) ans_ = res[factors_] = measure(linear(NTerms, 5), n=N) print_results(factors_, ans_, output) - factors_ = tuple(factors+['Linear','Loop 15']) + factors_ = tuple(factors + ['Linear', 'Loop 15']) ans_ = res[factors_] = measure(linear(NTerms, 15), n=N) print_results(factors_, ans_, output) - factors_ = tuple(factors+['Linear','Loop 6']) + factors_ = tuple(factors + ['Linear', 'Loop 6']) ans_ = res[factors_] = measure(linear(NTerms, 6), n=N) print_results(factors_, ans_, output) - factors_ = tuple(factors+['Linear','Loop 7']) + factors_ = tuple(factors + ['Linear', 'Loop 7']) ans_ = res[factors_] = measure(linear(NTerms, 7), n=N) print_results(factors_, ans_, output) - factors_ = tuple(factors+['Linear','Loop 17']) + factors_ = tuple(factors + ['Linear', 'Loop 17']) ans_ = res[factors_] = measure(linear(NTerms, 17), n=N) print_results(factors_, ans_, output) - factors_ = tuple(factors+['Linear','Loop 8']) + factors_ = tuple(factors + ['Linear', 'Loop 8']) ans_ = res[factors_] = measure(linear(NTerms, 8), n=N) print_results(factors_, ans_, output) - factors_ = tuple(factors+['Linear','Loop 9']) + factors_ = tuple(factors + ['Linear', 'Loop 9']) ans_ = res[factors_] = measure(linear(NTerms, 9), n=N) print_results(factors_, ans_, output) if True: - factors_ = tuple(factors+['SimpleLinear','Loop 1']) + factors_ = tuple(factors + ['SimpleLinear', 'Loop 1']) ans_ = res[factors_] = measure(simple_linear(NTerms, 1), n=N) print_results(factors_, ans_, output) if True: - factors_ = tuple(factors+['SimpleLinear','Loop 2']) + factors_ = tuple(factors + ['SimpleLinear', 'Loop 2']) ans_ = res[factors_] = measure(simple_linear(NTerms, 2), n=N) print_results(factors_, ans_, output) - factors_ = tuple(factors+['SimpleLinear','Loop 12']) + factors_ = tuple(factors + ['SimpleLinear', 'Loop 12']) ans_ = res[factors_] = measure(simple_linear(NTerms, 12), n=N) print_results(factors_, ans_, output) - factors_ = tuple(factors+['SimpleLinear','Loop 3']) + factors_ = tuple(factors + ['SimpleLinear', 'Loop 3']) ans_ = res[factors_] = measure(simple_linear(NTerms, 3), n=N) print_results(factors_, ans_, output) - factors_ = tuple(factors+['SimpleLinear','Loop 13']) + factors_ = tuple(factors + ['SimpleLinear', 'Loop 13']) ans_ = res[factors_] = measure(simple_linear(NTerms, 13), n=N) print_results(factors_, ans_, output) - factors_ = tuple(factors+['SimpleLinear','Loop 4']) + factors_ = tuple(factors + ['SimpleLinear', 'Loop 4']) ans_ = res[factors_] = measure(simple_linear(NTerms, 4), n=N) print_results(factors_, ans_, output) - factors_ = tuple(factors+['SimpleLinear','Loop 14']) + factors_ = tuple(factors + ['SimpleLinear', 'Loop 14']) ans_ = res[factors_] = measure(simple_linear(NTerms, 14), n=N) print_results(factors_, ans_, output) - factors_ = tuple(factors+['SimpleLinear','Loop 5']) + factors_ = tuple(factors + ['SimpleLinear', 'Loop 5']) ans_ = res[factors_] = measure(simple_linear(NTerms, 5), n=N) print_results(factors_, ans_, output) - factors_ = tuple(factors+['SimpleLinear','Loop 15']) + factors_ = tuple(factors + ['SimpleLinear', 'Loop 15']) ans_ = res[factors_] = measure(simple_linear(NTerms, 15), n=N) print_results(factors_, ans_, output) - factors_ = tuple(factors+['SimpleLinear','Loop 7']) + factors_ = tuple(factors + ['SimpleLinear', 'Loop 7']) ans_ = res[factors_] = measure(simple_linear(NTerms, 7), n=N) print_results(factors_, ans_, output) - factors_ = tuple(factors+['SimpleLinear','Loop 17']) + factors_ = tuple(factors + ['SimpleLinear', 'Loop 17']) ans_ = res[factors_] = measure(simple_linear(NTerms, 17), n=N) print_results(factors_, ans_, output) - factors_ = tuple(factors+['SimpleLinear','Loop 8']) + factors_ = tuple(factors + ['SimpleLinear', 'Loop 8']) ans_ = res[factors_] = measure(simple_linear(NTerms, 8), n=N) print_results(factors_, ans_, output) - factors_ = tuple(factors+['SimpleLinear','Loop 9']) + factors_ = tuple(factors + ['SimpleLinear', 'Loop 9']) ans_ = res[factors_] = measure(simple_linear(NTerms, 9), n=N) print_results(factors_, ans_, output) - if True: - factors_ = tuple(factors+['NestedLinear','Loop 1']) + factors_ = tuple(factors + ['NestedLinear', 'Loop 1']) ans_ = res[factors_] = measure(nested_linear(NTerms, 1), n=N) print_results(factors_, ans_, output) - factors_ = tuple(factors+['NestedLinear','Loop 2']) + factors_ = tuple(factors + ['NestedLinear', 'Loop 2']) ans_ = res[factors_] = measure(nested_linear(NTerms, 2), n=N) print_results(factors_, ans_, output) - factors_ = tuple(factors+['NestedLinear','Loop 12']) + factors_ = tuple(factors + ['NestedLinear', 'Loop 12']) ans_ = res[factors_] = measure(nested_linear(NTerms, 12), n=N) print_results(factors_, ans_, output) - factors_ = tuple(factors+['NestedLinear','Loop 3']) + factors_ = tuple(factors + ['NestedLinear', 'Loop 3']) ans_ = res[factors_] = measure(nested_linear(NTerms, 3), n=N) print_results(factors_, ans_, output) - factors_ = tuple(factors+['NestedLinear','Loop 13']) + factors_ = tuple(factors + ['NestedLinear', 'Loop 13']) ans_ = res[factors_] = measure(nested_linear(NTerms, 13), n=N) print_results(factors_, ans_, output) - factors_ = tuple(factors+['NestedLinear','Loop 4']) + factors_ = tuple(factors + ['NestedLinear', 'Loop 4']) ans_ = res[factors_] = measure(nested_linear(NTerms, 4), n=N) print_results(factors_, ans_, output) - factors_ = tuple(factors+['NestedLinear','Loop 14']) + factors_ = tuple(factors + ['NestedLinear', 'Loop 14']) ans_ = res[factors_] = measure(nested_linear(NTerms, 14), n=N) print_results(factors_, ans_, output) - factors_ = tuple(factors+['NestedLinear','Loop 5']) + factors_ = tuple(factors + ['NestedLinear', 'Loop 5']) ans_ = res[factors_] = measure(nested_linear(NTerms, 5), n=N) print_results(factors_, ans_, output) - factors_ = tuple(factors+['NestedLinear','Loop 15']) + factors_ = tuple(factors + ['NestedLinear', 'Loop 15']) ans_ = res[factors_] = measure(nested_linear(NTerms, 15), n=N) print_results(factors_, ans_, output) - factors_ = tuple(factors+['NestedLinear','Loop 6']) + factors_ = tuple(factors + ['NestedLinear', 'Loop 6']) ans_ = res[factors_] = measure(nested_linear(NTerms, 6), n=N) print_results(factors_, ans_, output) - if True: - factors_ = tuple(factors+['Bilinear','Loop 1']) + factors_ = tuple(factors + ['Bilinear', 'Loop 1']) ans_ = res[factors_] = measure(bilinear(NTerms, 1), n=N) print_results(factors_, ans_, output) - factors_ = tuple(factors+['Bilinear','Loop 2']) + factors_ = tuple(factors + ['Bilinear', 'Loop 2']) ans_ = res[factors_] = measure(bilinear(NTerms, 2), n=N) print_results(factors_, ans_, output) - factors_ = tuple(factors+['Bilinear','Loop 12']) + factors_ = tuple(factors + ['Bilinear', 'Loop 12']) ans_ = res[factors_] = measure(bilinear(NTerms, 12), n=N) print_results(factors_, ans_, output) - factors_ = tuple(factors+['Bilinear','Loop 3']) + factors_ = tuple(factors + ['Bilinear', 'Loop 3']) ans_ = res[factors_] = measure(bilinear(NTerms, 3), n=N) print_results(factors_, ans_, output) - factors_ = tuple(factors+['Bilinear','Loop 13']) + factors_ = tuple(factors + ['Bilinear', 'Loop 13']) ans_ = res[factors_] = measure(bilinear(NTerms, 13), n=N) print_results(factors_, ans_, output) - factors_ = tuple(factors+['Bilinear','Loop 4']) + factors_ = tuple(factors + ['Bilinear', 'Loop 4']) ans_ = res[factors_] = measure(bilinear(NTerms, 4), n=N) print_results(factors_, ans_, output) - if True: - factors_ = tuple(factors+['Nonlinear','Loop 2']) + factors_ = tuple(factors + ['Nonlinear', 'Loop 2']) ans_ = res[factors_] = measure(nonlinear(NTerms, 2), n=N) print_results(factors_, ans_, output) - factors_ = tuple(factors+['Nonlinear','Loop 12']) + factors_ = tuple(factors + ['Nonlinear', 'Loop 12']) ans_ = res[factors_] = measure(nonlinear(NTerms, 12), n=N) print_results(factors_, ans_, output) - factors_ = tuple(factors+['Nonlinear','Loop 3']) + factors_ = tuple(factors + ['Nonlinear', 'Loop 3']) ans_ = res[factors_] = measure(nonlinear(NTerms, 3), n=N) print_results(factors_, ans_, output) - factors_ = tuple(factors+['Nonlinear','Loop 13']) + factors_ = tuple(factors + ['Nonlinear', 'Loop 13']) ans_ = res[factors_] = measure(nonlinear(NTerms, 13), n=N) print_results(factors_, ans_, output) - factors_ = tuple(factors+['Nonlinear','Loop 4']) + factors_ = tuple(factors + ['Nonlinear', 'Loop 4']) ans_ = res[factors_] = measure(nonlinear(NTerms, 4), n=N) print_results(factors_, ans_, output) - if True: - factors_ = tuple(factors+['Polynomial','Loop 3']) + factors_ = tuple(factors + ['Polynomial', 'Loop 3']) ans_ = res[factors_] = measure(polynomial(NTerms, 3), n=N) print_results(factors_, ans_, output) - if True: - factors_ = tuple(factors+['Product','Loop 1']) + factors_ = tuple(factors + ['Product', 'Loop 1']) ans_ = res[factors_] = measure(polynomial(NTerms, 1), n=N) print_results(factors_, ans_, output) - factors_ = tuple(factors+['Product','Loop 2']) + factors_ = tuple(factors + ['Product', 'Loop 2']) ans_ = res[factors_] = measure(polynomial(NTerms, 2), n=N) print_results(factors_, ans_, output) def remap_keys(mapping): - return [{'factors':k, 'performance': v} for k, v in mapping.items()] + return [{'factors': k, 'performance': v} for k, v in mapping.items()] + # # MAIN # res = {} -#runall(["COOPR3"], res) +# runall(["COOPR3"], res) -#EXPR.set_expression_tree_format(EXPR.common.Mode.pyomo4_trees) -#runall(["PYOMO4"], res) +# EXPR.set_expression_tree_format(EXPR.common.Mode.pyomo4_trees) +# runall(["PYOMO4"], res) -#EXPR.set_expression_tree_format(EXPR.common.Mode.pyomo5_trees) -#import cProfile -#cProfile.run('runall(["PYOMO5"], res)', 'restats4') +# EXPR.set_expression_tree_format(EXPR.common.Mode.pyomo5_trees) +# import cProfile +# cProfile.run('runall(["PYOMO5"], res)', 'restats4') runall(["PYOMO5"], res) if args.output: @@ -1261,21 +1294,34 @@ def remap_keys(mapping): # Write csv file # perf_types = sorted(next(iter(res.values())).keys()) - res_ = [ list(key) + [res.get(key,{}).get(k,{}).get('mean',-777) for k in perf_types] for key in sorted(res.keys())] + res_ = [ + list(key) + + [res.get(key, {}).get(k, {}).get('mean', -777) for k in perf_types] + for key in sorted(res.keys()) + ] with open(args.output, 'w') as OUTPUT: import csv + writer = csv.writer(OUTPUT) writer.writerow(['Version', 'ExprType', 'ExprNum'] + perf_types) for line in res_: writer.writerow(line) elif args.output.endswith(".json"): - res_ = {'script': sys.argv[0], 'NTerms':NTerms, 'NTrials':N, 'data': remap_keys(res), 'pyomo_version':pyomo.version.version, 'pyomo_versioninfo':pyomo.version.version_info[:3]} + res_ = { + 'script': sys.argv[0], + 'NTerms': NTerms, + 'NTrials': N, + 'data': remap_keys(res), + 'pyomo_version': pyomo.version.version, + 'pyomo_versioninfo': pyomo.version.version_info[:3], + } # # Write json file # with open(args.output, 'w') as OUTPUT: import json + json.dump(res_, OUTPUT) else: diff --git a/scripts/performance/main.py b/scripts/performance/main.py index 2814843e438..10349c0eb73 100755 --- a/scripts/performance/main.py +++ b/scripts/performance/main.py @@ -18,6 +18,7 @@ import platform import sys import time + try: import ujson as json except ImportError: @@ -39,6 +40,7 @@ class TimingHandler(logging.Handler): information and adds it to the test data recorder. """ + def __init__(self): super(TimingHandler, self).__init__() self._testRecord = None @@ -81,6 +83,7 @@ class DataRecorder(object): report. """ + def __init__(self, data): self._data = data self._timer = TicTocTimer() @@ -130,12 +133,7 @@ def getProjectInfo(project): version = _module.__version__ finally: os.chdir(cwd) - return { - 'branch': branch, - 'sha': sha, - 'diffs': diffs, - 'version': version, - } + return {'branch': branch, 'sha': sha, 'diffs': diffs, 'version': version} def getRunInfo(options): @@ -151,6 +149,7 @@ def getRunInfo(options): info['pypy_version'] = tuple(sys.pypy_version_info) if options.cython: import Cython + info['cython'] = tuple(int(x) for x in Cython.__version__.split('.')) for project in options.projects: info[project] = getProjectInfo(project) @@ -160,7 +159,7 @@ def getRunInfo(options): def run_tests(options, argv): gc.collect() gc.collect() - results = ( getRunInfo(options), OrderedDict() ) + results = (getRunInfo(options), OrderedDict()) recorder = DataRecorder(results[1]) unittest.pytest.main(argv, plugins=[recorder]) gc.collect() @@ -169,46 +168,48 @@ def run_tests(options, argv): def main(argv): - parser = argparse.ArgumentParser( - epilog="Remaining arguments are passed to pytest" - ) + parser = argparse.ArgumentParser(epilog="Remaining arguments are passed to pytest") parser.add_argument( - '-o', '--output', + '-o', + '--output', action='store', dest='output', default=None, - help='Store the test results to the specified file.' + help='Store the test results to the specified file.', ) parser.add_argument( - '-d', '--dir', + '-d', + '--dir', action='store', dest='output_dir', default=None, help='Store the test results in the specified directory. If -o ' 'is not specified, then a file name is automatically generated ' - 'based on the first "main project" git branch and hash.' + 'based on the first "main project" git branch and hash.', ) parser.add_argument( - '-p', '--project', + '-p', + '--project', action='append', dest='projects', default=[], help='Main project (used for generating and recording SHA and ' - 'DIFF information)' + 'DIFF information)', ) parser.add_argument( - '-n', '--replicates', + '-n', + '--replicates', action='store', dest='replicates', type=int, default=1, - help='Number of replicates to run.' + help='Number of replicates to run.', ) parser.add_argument( '--with-cython', action='store_true', dest='cython', - help='Cythonization enabled.' + help='Cythonization enabled.', ) options, argv = parser.parse_known_args(argv) @@ -229,11 +230,11 @@ def main(argv): if not options.output: options.output = 'perf-%s-%s-%s-%s.json' % ( results[0][options.projects[0]]['branch'], - results[0][options.projects[0]]['sha'][:7] + ( - '_mod' if results[0][options.projects[0]]['diffs'] else ''), - results[0]['python_implementation'].lower() + ( - '.'.join(str(i) for i in results[0]['python_version'][:3])), - time.strftime('%y%m%d_%H%M', time.localtime()) + results[0][options.projects[0]]['sha'][:7] + + ('_mod' if results[0][options.projects[0]]['diffs'] else ''), + results[0]['python_implementation'].lower() + + ('.'.join(str(i) for i in results[0]['python_version'][:3])), + time.strftime('%y%m%d_%H%M', time.localtime()), ) options.output = os.path.join(options.output_dir, options.output) if options.output: @@ -253,5 +254,6 @@ def main(argv): print("Performance run complete.") return results + if __name__ == '__main__': results = main(sys.argv) diff --git a/scripts/performance/simple.py b/scripts/performance/simple.py index 9ac5a8b0a37..2990f13f413 100644 --- a/scripts/performance/simple.py +++ b/scripts/performance/simple.py @@ -11,18 +11,23 @@ else: from pyomo.repn import generate_standard_repn + class TimeoutError(Exception): pass + class timeout: def __init__(self, seconds=10, error_message='Timeout'): self.seconds = seconds self.error_message = error_message + def handle_timeout(self, signum, frame): raise TimeoutError(self.error_message) + def __enter__(self): signal.signal(signal.SIGALRM, self.handle_timeout) signal.alarm(self.seconds) + def __exit__(self, type, value, traceback): signal.alarm(0) @@ -38,51 +43,51 @@ def __exit__(self, type, value, traceback): def linear(flag): if flag == 0: - expr=sum(model.x[i] for i in model.A) + expr = sum(model.x[i] for i in model.A) elif flag == 10: with EXPR.linear_expression as expr: - expr=sum((model.x[i] for i in model.A), expr) + expr = sum((model.x[i] for i in model.A), expr) elif flag == 20: - expr=quicksum(model.x[i] for i in model.A) + expr = quicksum(model.x[i] for i in model.A) elif flag == 1: expr = sum_product(model.p, model.x) elif flag == 6: - expr=quicksum((model.p[i]*model.x[i] for i in model.A), linear=False) + expr = quicksum((model.p[i] * model.x[i] for i in model.A), linear=False) elif flag == 16: - expr=quicksum((model.p[i]*model.x[i] for i in model.A), linear=True) + expr = quicksum((model.p[i] * model.x[i] for i in model.A), linear=True) elif flag == 26: - expr=quicksum(model.p[i]*model.x[i] for i in model.A) + expr = quicksum(model.p[i] * model.x[i] for i in model.A) elif flag == 2: - expr=sum(model.p[i]*model.x[i] for i in model.A) + expr = sum(model.p[i] * model.x[i] for i in model.A) elif flag == 3: - expr=0 + expr = 0 for i in model.A: expr += model.p[i] * model.x[i] elif flag == 4: try: with timeout(10): - expr=0 + expr = 0 for i in model.A: expr = expr + model.p[i] * model.x[i] except: - expr = model.x[1] # BOGUS + expr = model.x[1] # BOGUS elif flag == 5: try: with timeout(10): - expr=0 + expr = 0 for i in model.A: expr = model.p[i] * model.x[i] + expr except: - expr = model.x[1] # BOGUS + expr = model.x[1] # BOGUS elif flag == 12: with EXPR.linear_expression as expr: - expr=sum((model.p[i]*model.x[i] for i in model.A), expr) + expr = sum((model.p[i] * model.x[i] for i in model.A), expr) elif flag == 22: with EXPR.nonlinear_expression as expr: - expr=sum((model.p[i]*model.x[i] for i in model.A), expr) + expr = sum((model.p[i] * model.x[i] for i in model.A), expr) elif flag == 13: with EXPR.linear_expression as expr: for i in model.A: @@ -97,7 +102,7 @@ def linear(flag): expr = model.p[i] * model.x[i] + expr elif flag == 7: - expr=0 + expr = 0 for i in model.A: expr += model.p[i] * (1 + model.x[i]) elif flag == 17: @@ -105,60 +110,110 @@ def linear(flag): for i in model.A: expr += model.p[i] * (1 + model.x[i]) elif flag == 27: - expr = quicksum(model.p[i]*(1 + model.x[i]) for i in model.A) + expr = quicksum(model.p[i] * (1 + model.x[i]) for i in model.A) elif flag == 8: - expr=0 + expr = 0 for i in model.A: - expr += (model.x[i]+model.x[i]) + expr += model.x[i] + model.x[i] elif flag == 18: # This will assume a nonlinear sum expr = quicksum((model.x[i] + model.x[i]) for i in model.A) elif flag == 9: - expr=0 + expr = 0 for i in model.A: - expr += model.p[i]*(model.x[i]+model.x[i]) + expr += model.p[i] * (model.x[i] + model.x[i]) elif flag == 19: # This will assume a nonlinear sum - expr = quicksum(model.p[i]*(model.x[i] + model.x[i]) for i in model.A) + expr = quicksum(model.p[i] * (model.x[i] + model.x[i]) for i in model.A) elif flag == -9: expr = quicksum(sin(model.x[i]) for i in model.A) elif flag == 30: - expr=0 + expr = 0 for i in model.A: - expr += model.x[i]*model.y[i] + expr += model.x[i] * model.y[i] elif flag == -30: - expr= quicksum(model.x[i]*model.y[i] for i in model.A) + expr = quicksum(model.x[i] * model.y[i] for i in model.A) if coopr3 or pyomo4: repn = generate_ampl_repn(expr) else: repn = generate_standard_repn(EXPR.compress_expression(expr), quadratic=False) + if coopr3: import pyomo.core.kernel.expr_coopr3 as COOPR3 - print("REFCOUNT: "+str(COOPR3._getrefcount_available)) - for i in (0,2,3,6,7,8,9): - print((i,timeit.timeit('linear(%d)' % i, "from __main__ import linear", number=1))) + + print("REFCOUNT: " + str(COOPR3._getrefcount_available)) + for i in (0, 2, 3, 6, 7, 8, 9): + print( + ( + i, + timeit.timeit( + 'linear(%d)' % i, "from __main__ import linear", number=1 + ), + ) + ) if pyomo4: import pyomo.core.kernel.expr_pyomo4 as PYOMO4 + EXPR.set_expression_tree_format(EXPR.common.Mode.pyomo4_trees) - print("REFCOUNT: "+str(PYOMO4._getrefcount_available)) - for i in (0,2,3,6,7,8,9): - print((i,timeit.timeit('linear(%d)' % i, "from __main__ import linear", number=1))) + print("REFCOUNT: " + str(PYOMO4._getrefcount_available)) + for i in (0, 2, 3, 6, 7, 8, 9): + print( + ( + i, + timeit.timeit( + 'linear(%d)' % i, "from __main__ import linear", number=1 + ), + ) + ) if not (coopr3 or pyomo4): import pyomo.core.expr.expr_pyomo5 as PYOMO5 - #print("REFCOUNT: "+str(PYOMO5._getrefcount_available)) - #import cProfile - #cProfile.run("linear(7)", "stats.7") - for i in (0,10,20,2,12,22,3,13,4,14,5,15,6,16,26,7,17,27,8,18,9,19,-9,30,-30): - #for i in (6,16,26): - print((i,timeit.timeit('linear(%d)' % i, "from __main__ import linear", number=1))) + # print("REFCOUNT: "+str(PYOMO5._getrefcount_available)) + # import cProfile + # cProfile.run("linear(7)", "stats.7") + for i in ( + 0, + 10, + 20, + 2, + 12, + 22, + 3, + 13, + 4, + 14, + 5, + 15, + 6, + 16, + 26, + 7, + 17, + 27, + 8, + 18, + 9, + 19, + -9, + 30, + -30, + ): + # for i in (6,16,26): + print( + ( + i, + timeit.timeit( + 'linear(%d)' % i, "from __main__ import linear", number=1 + ), + ) + ) diff --git a/setup.py b/setup.py index 90d840fd34a..baf32a61332 100644 --- a/setup.py +++ b/setup.py @@ -17,11 +17,13 @@ import platform import sys from setuptools import setup, find_packages, Command + try: from setuptools import DistutilsOptionError except ImportError: from distutils.errors import DistutilsOptionError + def read(*rnames): with open(os.path.join(os.path.dirname(__file__), *rnames)) as README: # Strip all leading badges up to, but not including the COIN-OR @@ -34,6 +36,7 @@ def read(*rnames): break return line + README.read() + def import_pyomo_module(*path): _module_globals = dict(globals()) _module_globals['__name__'] = None @@ -42,13 +45,16 @@ def import_pyomo_module(*path): exec(_FILE.read(), _module_globals) return _module_globals + def get_version(): # Source pyomo/version/info.py to get the version number - return import_pyomo_module('pyomo','version','info.py')['__version__'] + return import_pyomo_module('pyomo', 'version', 'info.py')['__version__'] + CYTHON_REQUIRED = "required" -if not any(arg.startswith(cmd) - for cmd in ('build','install','bdist') for arg in sys.argv): +if not any( + arg.startswith(cmd) for cmd in ('build', 'install', 'bdist') for arg in sys.argv +): using_cython = False else: using_cython = "automatic" @@ -66,16 +72,18 @@ def get_version(): # break out of this try-except (disable Cython) raise RuntimeError("Cython is only supported under CPython") from Cython.Build import cythonize + # # Note: The Cython developers recommend that you distribute C source # files to users. But this is fine for evaluating the utility of Cython # import shutil + files = [ "pyomo/core/expr/numvalue.pyx", "pyomo/core/expr/numeric_expr.pyx", "pyomo/core/expr/logical_expr.pyx", - #"pyomo/core/expr/visitor.pyx", + # "pyomo/core/expr/visitor.pyx", "pyomo/core/util.pyx", "pyomo/repn/standard_repn.pyx", "pyomo/repn/plugins/cpxlp.pyx", @@ -85,20 +93,22 @@ def get_version(): ] for f in files: shutil.copyfile(f[:-1], f) - ext_modules = cythonize(files, - compiler_directives={"language_level": 3}) + ext_modules = cythonize(files, compiler_directives={"language_level": 3}) except: if using_cython == CYTHON_REQUIRED: - print(""" + print( + """ ERROR: Cython was explicitly requested with --with-cython, but cythonization of core Pyomo modules failed. -""") +""" + ) raise using_cython = False -if (('--with-distributable-extensions' in sys.argv) - or (os.getenv('PYOMO_SETUP_ARGS') is not None and - '--with-distributable-extensions' in os.getenv('PYOMO_SETUP_ARGS'))): +if ('--with-distributable-extensions' in sys.argv) or ( + os.getenv('PYOMO_SETUP_ARGS') is not None + and '--with-distributable-extensions' in os.getenv('PYOMO_SETUP_ARGS') +): try: sys.argv.remove('--with-distributable-extensions') except: @@ -108,10 +118,14 @@ def get_version(): # NOTE: There is inconsistent behavior in Windows for APPSI. # As a result, we will NOT include these extensions in Windows. if not sys.platform.startswith('win'): - appsi_extension = import_pyomo_module( - 'pyomo', 'contrib', 'appsi', 'build.py')['get_appsi_extension']( - in_setup=True, appsi_root=os.path.join( - os.path.dirname(__file__), 'pyomo', 'contrib', 'appsi')) + appsi_extension = import_pyomo_module('pyomo', 'contrib', 'appsi', 'build.py')[ + 'get_appsi_extension' + ]( + in_setup=True, + appsi_root=os.path.join( + os.path.dirname(__file__), 'pyomo', 'contrib', 'appsi' + ), + ) ext_modules.append(appsi_extension) @@ -127,24 +141,22 @@ class DependenciesCommand(Command): `extras_require`). """ + description = "list the dependencies for this package" - user_options = [ - ('extras=', None, 'extra targets to include'), - ] + user_options = [('extras=', None, 'extra targets to include')] def initialize_options(self): self.extras = None def finalize_options(self): if self.extras is not None: - self.extras = [ - e for e in (_.strip() for _ in self.extras.split(',')) if e - ] + self.extras = [e for e in (_.strip() for _ in self.extras.split(',')) if e] for e in self.extras: if e not in setup_kwargs['extras_require']: raise DistutilsOptionError( "extras can only include {%s}" - % (', '.join(setup_kwargs['extras_require']))) + % (', '.join(setup_kwargs['extras_require'])) + ) def run(self): deps = list(self._print_deps(setup_kwargs['install_requires'])) @@ -165,26 +177,26 @@ def _print_deps(self, deplist): setup_kwargs = dict( - name = 'Pyomo', + name='Pyomo', # # Note: the release number is set in pyomo/version/info.py # - cmdclass = {'dependencies': DependenciesCommand}, - version = get_version(), - maintainer = 'Pyomo Developer Team', - maintainer_email = 'pyomo-developers@googlegroups.com', - url = 'http://pyomo.org', - project_urls = { + cmdclass={'dependencies': DependenciesCommand}, + version=get_version(), + maintainer='Pyomo Developer Team', + maintainer_email='pyomo-developers@googlegroups.com', + url='http://pyomo.org', + project_urls={ 'Documentation': 'https://pyomo.readthedocs.io/', 'Source': 'https://github.com/Pyomo/pyomo', }, - license = 'BSD', - platforms = ["any"], - description = 'Pyomo: Python Optimization Modeling Objects', - long_description = read('README.md'), - long_description_content_type = 'text/markdown', - keywords = ['optimization'], - classifiers = [ + license='BSD', + platforms=["any"], + description='Pyomo: Python Optimization Modeling Objects', + long_description=read('README.md'), + long_description_content_type='text/markdown', + keywords=['optimization'], + classifiers=[ 'Development Status :: 5 - Production/Stable', 'Intended Audience :: End Users/Desktop', 'Intended Audience :: Science/Research', @@ -203,12 +215,11 @@ def _print_deps(self, deplist): 'Programming Language :: Python :: Implementation :: CPython', 'Programming Language :: Python :: Implementation :: PyPy', 'Topic :: Scientific/Engineering :: Mathematics', - 'Topic :: Software Development :: Libraries :: Python Modules' ], - python_requires = '>=3.7', - install_requires = [ - 'ply', + 'Topic :: Software Development :: Libraries :: Python Modules', ], - extras_require = { + python_requires='>=3.7', + install_requires=['ply'], + extras_require={ 'tests': [ #'codecov', # useful for testing infrastructures, but not required 'coverage', @@ -223,12 +234,12 @@ def _print_deps(self, deplist): 'sphinx_rtd_theme>0.5', 'sphinxcontrib-jsmath', 'sphinxcontrib-napoleon', - 'numpy', # Needed by autodoc for pynumero - 'scipy', # Needed by autodoc for pynumero + 'numpy', # Needed by autodoc for pynumero + 'scipy', # Needed by autodoc for pynumero ], 'optional': [ - 'dill', # No direct use, but improves lambda pickle - 'ipython', # contrib.viewer + 'dill', # No direct use, but improves lambda pickle + 'ipython', # contrib.viewer # Note: matplotlib 3.6.1 has bug #24127, which breaks # seaborn's histplot (triggering parmest failures) 'matplotlib!=3.6.1', @@ -236,13 +247,13 @@ def _print_deps(self, deplist): 'numpy', 'openpyxl', # dataportals #'pathos', # requested for #963, but PR currently closed - 'pint', # units - 'python-louvain', # community_detection - 'pyyaml', # core + 'pint', # units + 'python-louvain', # community_detection + 'pyyaml', # core 'scipy', - 'sympy', # differentiation - 'xlrd', # dataportals - 'z3-solver', # community_detection + 'sympy', # differentiation + 'xlrd', # dataportals + 'z3-solver', # community_detection # # subprocess output is merged more reliably if # 'PeekNamedPipe' is available from pywin32 @@ -255,28 +266,28 @@ def _print_deps(self, deplist): # DAE can use casadi; as of 1 Nov 22, casadi has not been # released for Python 3.11 'casadi; implementation_name!="pypy" and python_version<"3.11"', - 'numdifftools; implementation_name!="pypy"', # pynumero + 'numdifftools; implementation_name!="pypy"', # pynumero 'pandas; implementation_name!="pypy"', - 'seaborn; implementation_name!="pypy"', # parmest.graphics + 'seaborn; implementation_name!="pypy"', # parmest.graphics ], }, - packages = find_packages(exclude=("scripts",)), - package_data = { + packages=find_packages(exclude=("scripts",)), + package_data={ "pyomo.contrib.ampl_function_demo": ["src/*"], "pyomo.contrib.appsi.cmodel": ["src/*"], "pyomo.contrib.mcpp": ["*.cpp"], "pyomo.contrib.pynumero": ['src/*', 'src/tests/*'], "pyomo.contrib.viewer": ["*.ui"], }, - ext_modules = ext_modules, - entry_points = """ + ext_modules=ext_modules, + entry_points=""" [console_scripts] pyomo = pyomo.scripting.pyomo_main:main_console_script [pyomo.command] pyomo.help = pyomo.scripting.driver_help pyomo.viewer=pyomo.contrib.viewer.pyomo_viewer - """ + """, ) @@ -290,23 +301,31 @@ def _print_deps(self, deplist): if 'Microsoft Visual C++' not in str(e_info): raise elif using_cython == CYTHON_REQUIRED: - print(""" + print( + """ ERROR: Cython was explicitly requested with --with-cython, but cythonization of core Pyomo modules failed. -""") +""" + ) raise else: - print(""" + print( + """ ERROR: setup() failed: %s Re-running setup() without the Cython modules -""" % (str(e_info),)) +""" + % (str(e_info),) + ) setup_kwargs['ext_modules'] = [] setup(**setup_kwargs) - print(""" + print( + """ WARNING: Installation completed successfully, but the attempt to cythonize core Pyomo modules failed. Cython provides performance optimizations and is not required for any Pyomo functionality. Cython returned the following error: "%s" -""" % (str(e_info),)) +""" + % (str(e_info),) + )