diff --git a/.github/workflows/build-bazel.yml b/.github/workflows/build-bazel.yml index 3d92177760..3bf734f973 100644 --- a/.github/workflows/build-bazel.yml +++ b/.github/workflows/build-bazel.yml @@ -12,20 +12,16 @@ jobs: steps: - uses: actions/checkout@v4 - - uses: bazelbuild/setup-bazelisk@v2 - - - name: Mount bazel cache # Optional - uses: actions/cache@v3 - with: - path: "~/.cache/bazel" - key: bazel + - uses: bazelbuild/setup-bazelisk@v3 - name: bazel clean run: bazel clean - name: build bazel - run: | - bazel build //... + run: bazel build --enable_bzlmod //... - - name: test + - name: test all + run: bazel test --enable_bzlmod //... + + - name: test example run: ./bazel-bin/call-highs-example \ No newline at end of file diff --git a/.github/workflows/build-nuget-package.yml b/.github/workflows/build-nuget-package.yml index 8e87ea2b2f..226273b932 100644 --- a/.github/workflows/build-nuget-package.yml +++ b/.github/workflows/build-nuget-package.yml @@ -185,7 +185,7 @@ jobs: - name: Dotnet pack working-directory: ${{runner.workspace}}/build/dotnet/Highs.Native - run: dotnet pack -c Release /p:Version=1.8.1 + run: dotnet pack -c Release /p:Version=1.9.0 - uses: actions/upload-artifact@v4 with: diff --git a/.github/workflows/build-python-package.yml b/.github/workflows/build-python-package.yml index d356bb1d1f..0b6c32f2fe 100644 --- a/.github/workflows/build-python-package.yml +++ b/.github/workflows/build-python-package.yml @@ -11,6 +11,7 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 + - uses: seanmiddleditch/gha-setup-ninja@master - name: Build sdist run: | @@ -194,7 +195,7 @@ jobs: python -m pip install pytest python -m pytest - build_wheel_windows: + build_wheel_windows_313: runs-on: windows-2019 steps: - uses: actions/checkout@v4 diff --git a/.github/workflows/build-python-sdist.yml b/.github/workflows/build-python-sdist.yml new file mode 100644 index 0000000000..914f46fe1c --- /dev/null +++ b/.github/workflows/build-python-sdist.yml @@ -0,0 +1,88 @@ +name: build-python-sdist + +on: [push, pull_request] + +jobs: + build_sdist_ubuntu: + runs-on: ${{ matrix.os }} + strategy: + matrix: + os: [ubuntu-latest] + + steps: + - uses: actions/checkout@v4 + - uses: seanmiddleditch/gha-setup-ninja@master + + - name: Build sdist + shell: bash + run: pipx run build --sdist + + - name: check metadata + run: pipx run twine check dist/* + + - name: install highspy + run: | + python3 -m pip install dist/*.tar.gz --user + + - name: Test Python Examples + run: | + python3 ./examples/call_highs_from_python_highspy.py + python3 ./examples/call_highs_from_python_mps.py + python3 ./examples/call_highs_from_python.py + python3 ./examples/minimal.py + + build_sdist_mac: + runs-on: ${{ matrix.os }} + strategy: + matrix: + os: [macos-latest] + + steps: + - uses: actions/checkout@v4 + - uses: seanmiddleditch/gha-setup-ninja@master + + - name: Build sdist + shell: bash + run: pipx run build --sdist + + - name: check metadata + run: pipx run twine check dist/* + + - name: install highspy + run: | + python3 -m venv path/to/venv + source path/to/venv/bin/activate + python3 -m pip install dist/*.tar.gz + + - name: Test Python Examples + run: | + source path/to/venv/bin/activate + python3 ./examples/call_highs_from_python_highspy.py + python3 ./examples/call_highs_from_python_mps.py + python3 ./examples/call_highs_from_python.py + python3 ./examples/minimal.py + + build_sdist_win: + runs-on: windows-latest + + steps: + - uses: actions/checkout@v4 + + - name: Build sdist + shell: bash + run: pipx run build --sdist + + - name: check metadata + run: pipx run twine check dist/* + + - name: install highspy + run: | + $item = Get-ChildItem dist + python -m pip install "$item" + + - name: Test Python Examples + run: | + python ./examples/call_highs_from_python_highspy.py + python ./examples/call_highs_from_python_mps.py + python ./examples/call_highs_from_python.py + python ./examples/minimal.py \ No newline at end of file diff --git a/.github/workflows/build-wheels-push.yml b/.github/workflows/build-wheels-push.yml index 6bd76ef7cd..899d865c9a 100644 --- a/.github/workflows/build-wheels-push.yml +++ b/.github/workflows/build-wheels-push.yml @@ -1,11 +1,12 @@ name: build-wheels-push +on: [] # on: push -on: - release: - types: - - published +# on: +# release: +# types: +# - published concurrency: group: ${{ github.workflow }}-${{ github.ref }} @@ -22,13 +23,13 @@ jobs: shell: bash run: pipx run build --sdist - # - name: check metadata - # run: pipx run twine check python/dist/* + - name: check metadata + run: pipx run twine check dist/* - uses: actions/upload-artifact@v4 with: name: cibw-sdist - path: python/dist/*.tar.gz + path: dist/*.tar.gz build_wheels: name: Build wheel for ${{ matrix.python }}-${{ matrix.buildplat[1] }} @@ -81,49 +82,22 @@ jobs: name: cibw-wheels-${{ matrix.python }}-${{ matrix.buildplat[1] }} path: wheelhouse/*.whl - # upload_testpypi: - # name: >- - # Publish highspy to TestPyPI - # runs-on: ubuntu-latest - # needs: [build_wheels, build_sdist] - - # # upload to PyPI on every tag starting with 'v' - # # if: github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v') - - # environment: - # name: testpypi - # url: https://testpypi.org/p/highspy - - # permissions: - # id-token: write # IMPORTANT: mandatory for trusted publishing - # steps: - # - uses: actions/download-artifact@v4 - # with: - # pattern: cibw-* - # path: dist - # merge-multiple: true - - # - name: Download all - # uses: pypa/gh-action-pypi-publish@release/v1 - # with: - # repository-url: https://test.pypi.org/legacy/ - - upload_pypi: + upload_testpypi: name: >- - Publish highspy to PyPI + Publish highspy to TestPyPI runs-on: ubuntu-latest needs: [build_wheels, build_sdist] + # needs: [build_sdist] # upload to PyPI on every tag starting with 'v' # if: github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v') environment: - name: pypi - url: https://pypi.org/p/highspy + name: testpypi + url: https://test.pypi.org/p/highspy permissions: id-token: write # IMPORTANT: mandatory for trusted publishing - steps: - uses: actions/download-artifact@v4 with: @@ -133,3 +107,32 @@ jobs: - name: Download all uses: pypa/gh-action-pypi-publish@release/v1 + with: + repository-url: https://test.pypi.org/legacy/ + verbose: true + + # upload_pypi: + # name: >- + # Publish highspy to PyPI + # runs-on: ubuntu-latest + # needs: [build_wheels, build_sdist] + + # # upload to PyPI on every tag starting with 'v' + # # if: github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v') + + # environment: + # name: pypi + # url: https://pypi.org/p/highspy + + # permissions: + # id-token: write # IMPORTANT: mandatory for trusted publishing + + # steps: + # - uses: actions/download-artifact@v4 + # with: + # pattern: cibw-* + # path: dist + # merge-multiple: true + + # - name: Download all + # uses: pypa/gh-action-pypi-publish@release/v1 diff --git a/.github/workflows/build-wheels.yml b/.github/workflows/build-wheels.yml index ce012369bf..25819ad7e0 100644 --- a/.github/workflows/build-wheels.yml +++ b/.github/workflows/build-wheels.yml @@ -18,6 +18,9 @@ jobs: shell: bash run: pipx run build --sdist + - name: check metadata + run: pipx run twine check dist/* + build_wheels: name: Build wheel for ${{ matrix.python }}-${{ matrix.buildplat[1] }} runs-on: ${{ matrix.buildplat[0] }} diff --git a/.github/workflows/test-nuget-macos.yml b/.github/workflows/test-nuget-macos.yml index 20b102d015..ba72778fb5 100644 --- a/.github/workflows/test-nuget-macos.yml +++ b/.github/workflows/test-nuget-macos.yml @@ -35,7 +35,7 @@ jobs: - name: Dotnet pack working-directory: ${{runner.workspace}}/build/dotnet/Highs.Native - run: dotnet pack -c Release /p:Version=1.8.1 + run: dotnet pack -c Release /p:Version=1.9.0 - name: Add local feed run: dotnet nuget add source ${{runner.workspace}}/nugets @@ -80,7 +80,7 @@ jobs: - name: Dotnet pack working-directory: ${{runner.workspace}}/build/dotnet/Highs.Native - run: dotnet pack -c Release /p:Version=1.8.1 + run: dotnet pack -c Release /p:Version=1.9.0 - name: Add local feed run: dotnet nuget add source ${{runner.workspace}}/nugets diff --git a/.github/workflows/test-nuget-package.yml b/.github/workflows/test-nuget-package.yml index 69559912ce..5363584d9d 100644 --- a/.github/workflows/test-nuget-package.yml +++ b/.github/workflows/test-nuget-package.yml @@ -35,7 +35,7 @@ jobs: - name: Dotnet pack working-directory: ${{runner.workspace}}/build/dotnet/Highs.Native - run: dotnet pack -c Release /p:Version=1.8.1 + run: dotnet pack -c Release /p:Version=1.9.0 - name: Add local feed run: dotnet nuget add source ${{runner.workspace}}/nugets @@ -81,7 +81,7 @@ jobs: - name: Dotnet pack working-directory: ${{runner.workspace}}/build/dotnet/Highs.Native - run: dotnet pack -c Release /p:Version=1.8.1 + run: dotnet pack -c Release /p:Version=1.9.0 - name: Add local feed run: dotnet nuget add source ${{runner.workspace}}/nugets @@ -124,7 +124,7 @@ jobs: - name: Dotnet pack working-directory: ${{runner.workspace}}/build/dotnet/Highs.Native - run: dotnet pack -c Release /p:Version=1.8.1 + run: dotnet pack -c Release /p:Version=1.9.0 - name: Add local feed run: dotnet nuget add source ${{runner.workspace}}/nugets @@ -167,7 +167,7 @@ jobs: - name: Dotnet pack working-directory: ${{runner.workspace}}/build/dotnet/Highs.Native - run: dotnet pack -c Release /p:Version=1.8.1 + run: dotnet pack -c Release /p:Version=1.9.0 - name: Add local feed run: dotnet nuget add source ${{runner.workspace}}/nugets @@ -212,7 +212,7 @@ jobs: - name: Dotnet pack working-directory: ${{runner.workspace}}/build/dotnet/Highs.Native - run: dotnet pack -c Release /p:Version=1.8.1 + run: dotnet pack -c Release /p:Version=1.9.0 - name: Add local feed run: dotnet nuget add source -n name ${{runner.workspace}}\nugets @@ -228,5 +228,5 @@ jobs: dotnet new console rm Program.cs cp ${{runner.workspace}}\HiGHS\examples\call_highs_from_csharp.cs . - dotnet add package Highs.Native -v 1.8.1 -s ${{runner.workspace}}\nugets + dotnet add package Highs.Native -v 1.9.0 -s ${{runner.workspace}}\nugets dotnet run diff --git a/.github/workflows/test-nuget-ubuntu.yml b/.github/workflows/test-nuget-ubuntu.yml index 67b60c11c9..206dad6095 100644 --- a/.github/workflows/test-nuget-ubuntu.yml +++ b/.github/workflows/test-nuget-ubuntu.yml @@ -31,7 +31,7 @@ jobs: - name: Dotnet pack working-directory: ${{runner.workspace}}/build/dotnet/Highs.Native - run: dotnet pack -c Release /p:Version=1.8.1 + run: dotnet pack -c Release /p:Version=1.9.0 - name: Add local feed run: dotnet nuget add source ${{runner.workspace}}/nugets @@ -74,7 +74,7 @@ jobs: - name: Dotnet pack working-directory: ${{runner.workspace}}/build/dotnet/Highs.Native - run: dotnet pack -c Release /p:Version=1.8.1 + run: dotnet pack -c Release /p:Version=1.9.0 - name: Add local feed run: dotnet nuget add source ${{runner.workspace}}/nugets diff --git a/.github/workflows/test-nuget-win.yml b/.github/workflows/test-nuget-win.yml index 8d6e74ef38..ad7d6a0476 100644 --- a/.github/workflows/test-nuget-win.yml +++ b/.github/workflows/test-nuget-win.yml @@ -33,7 +33,7 @@ jobs: - name: Dotnet pack working-directory: ${{runner.workspace}}/build/dotnet/Highs.Native - run: dotnet pack -c Release /p:Version=1.8.1 + run: dotnet pack -c Release /p:Version=1.9.0 - name: Add local feed run: dotnet nuget add source -n name ${{runner.workspace}}\nugets @@ -49,5 +49,5 @@ jobs: dotnet new console rm Program.cs cp ${{runner.workspace}}\HiGHS\examples\call_highs_from_csharp.cs . - dotnet add package Highs.Native -v 1.8.1 -s ${{runner.workspace}}\nugets + dotnet add package Highs.Native -v 1.9.0 -s ${{runner.workspace}}\nugets dotnet run diff --git a/BUILD.bazel b/BUILD.bazel index c8b6070d03..1927d5e919 100644 --- a/BUILD.bazel +++ b/BUILD.bazel @@ -18,7 +18,6 @@ cc_library( name = "highs", srcs = glob([ "extern/filereaderlp/*.cpp", - "extern/zlib/*.cpp", "src/interfaces/highs_c_api.cpp", "src/io/*.cpp", "src/ipm/*.cpp", @@ -37,7 +36,6 @@ cc_library( "src/util/*.cpp", ]), hdrs = glob([ - "HConfig.h", "**/*.h", "src/qpsolver/*.hpp", "src/Highs.h", diff --git a/CMakeLists.txt b/CMakeLists.txt index c8deeee559..d27135425b 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -319,16 +319,17 @@ if(NOT FAST_BUILD) set(CMAKE_ARCHIVE_OUTPUT_DIRECTORY ${HIGHS_BINARY_DIR}/${CMAKE_INSTALL_LIBDIR}) endif() -if(CMAKE_SYSTEM_PROCESSOR MATCHES "^(x86\_64|i686)") - if (WIN32) - message("FLAG_MPOPCNT_SUPPORTED is not available on this architecture") - else() - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -mpopcnt") - endif() -elseif(CMAKE_SYSTEM_PROCESSOR MATCHES "^(ppc64|powerpc64)" AND NOT APPLE) - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -mpopcntd") +include(CheckCXXCompilerFlag) +if(CMAKE_SYSTEM_PROCESSOR MATCHES "^(ppc64|powerpc64)" AND NOT APPLE) + check_cxx_compiler_flag("-mpopcntd" COMPILER_SUPPORTS_POPCNTD) + if(COMPILER_SUPPORTS_POPCNTD) + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -mpopcntd") + endif() else() - message("FLAG_MPOPCNT_SUPPORTED is not available on this architecture") + check_cxx_compiler_flag("-mpopcnt" COMPILER_SUPPORTS_POPCNT) + if(COMPILER_SUPPORTS_POPCNT) + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -mpopcnt") + endif() endif() option(DEBUGSOL "check the debug solution" OFF) diff --git a/FEATURES.md b/FEATURES.md index 034eca4232..40d6292050 100644 --- a/FEATURES.md +++ b/FEATURES.md @@ -1,40 +1,16 @@ ## Build changes -## Code changes - -When primal infeasiblity is detected in presolve, no dual ray is available so, previously, the `has_dual_ray` parameter of `Highs::getDualRay` returned false and that was it. Now, if a null pointer is not passed for `dual_ray_value`, `Highs::getDualRay` will compute a dual ray - at the cost of solving the feasiblility LP without presolve. The same is now true for `Highs::getPrimalRay`. `Highs::getDualUnboundednessDirection` has been introduced to determine the product between the constraint matrix and the dual ray, forcing the calculation of the latter if necessary. Once a dual ray is known for the incumbent model in HiGHS, subsequent calls to `Highs::getDualRay` and `Highs::getDualUnboundednessDirection` will be vastly cheaper - -The method `Highs::getDualObjectiveValue` now exitsts to compute the dual objective value, returning `HighsStatus::kError` if it is not possible. - -The method `Highs::getStandardFormLp` now exists to return the incumbent LP in standard form - overlooking any integrality or Hessian. To determine the sizes of the vectors of data, the method is called without specifying pointers to the data arrays. - -Added documentation on the use of presolve when solving an incumbent model, and clarifying the use of the method `Highs::presolve`. - -HiGHS will now read a `MIPLIB` solution file - -Added time limit check to `HPresolve::strengthenInequalities` - -Added `getColIntegrality` to `highspy` - -Now computing the primal-dual integral, reporting it, and making it available as `HighsInfo::primal_dual_integral` - -Trivial primal heuristics "all zero", "all lower bound", "all upper bound", and "lock point" added to the MIP solver - -# Build changes - -Added wheels for Python 3.13 - -Updated command line options and moved them out of the library and into the executable - - - - - +Python: Source Distribution update, added to PyPI +## Code changes +HiGHS now handles multiple linear objectives by either blending using weights, or performing lexicographic optimization: see https://ergo-code.github.io/HiGHS/stable/guide/further/#guide-multi-objective-optimization +Fixed minor bug in bound checking in presolve +Fixed bug in `floor(HighsCDouble x)` and `ceil(HighsCDouble x)` when argument is small +Added some sanity checks to Highs::writeLocalModel to prevent segfaults if called directly by a user diff --git a/MODULE.bazel b/MODULE.bazel index adb22ca979..4697762158 100644 --- a/MODULE.bazel +++ b/MODULE.bazel @@ -1,17 +1,21 @@ +"""highs module +""" module( name = "highs", - version = "1.8.1", + version = "1.9.0", ) bazel_dep( name = "bazel_skylib", - version = "1.6.1", + version = "1.7.1", ) + bazel_dep( name = "rules_cc", - version = "0.0.9", + version = "0.0.16", ) + bazel_dep( name = "zlib", - version = "1.3.1.bcr.1", + version = "1.3.1.bcr.3", ) diff --git a/README.md b/README.md index 62cfef67b4..9e4d6fbd1e 100644 --- a/README.md +++ b/README.md @@ -208,7 +208,7 @@ The nuget package Highs.Native is on https://www.nuget.org, at https://www.nuget It can be added to your C# project with `dotnet` ```shell -dotnet add package Highs.Native --version 1.8.1 +dotnet add package Highs.Native --version 1.9.0 ``` The nuget package contains runtime libraries for diff --git a/Version.txt b/Version.txt index d8e7ce799d..6282aad654 100644 --- a/Version.txt +++ b/Version.txt @@ -1,4 +1,4 @@ HIGHS_MAJOR=1 -HIGHS_MINOR=8 -HIGHS_PATCH=1 +HIGHS_MINOR=9 +HIGHS_PATCH=0 #PRE_RELEASE=YES diff --git a/check/CMakeLists.txt b/check/CMakeLists.txt index 051b09623a..b1076e1825 100644 --- a/check/CMakeLists.txt +++ b/check/CMakeLists.txt @@ -48,6 +48,7 @@ if ((NOT FAST_BUILD OR ALL_TESTS) AND NOT (BUILD_EXTRA_UNIT_ONLY)) TestBasis.cpp TestBasisSolves.cpp TestCrossover.cpp + TestHighsCDouble.cpp TestHighsHash.cpp TestHighsIntegers.cpp TestHighsParallel.cpp @@ -66,6 +67,7 @@ if ((NOT FAST_BUILD OR ALL_TESTS) AND NOT (BUILD_EXTRA_UNIT_ONLY)) TestLpModification.cpp TestLpOrientation.cpp TestModelProperties.cpp + TestMultiObjective.cpp TestPdlp.cpp TestPresolve.cpp TestQpSolver.cpp @@ -75,16 +77,10 @@ if ((NOT FAST_BUILD OR ALL_TESTS) AND NOT (BUILD_EXTRA_UNIT_ONLY)) TestThrow.cpp TestTspSolver.cpp TestUserScale.cpp - Avgas.cpp) - - # todo: IG - if (NOT APPLE) - # Bug with updated IPX code and gas11. Maybe somehow related to the rpath on - # macOS (Lukas). Only triggered by gas11 with no presolve which is strange. - # may be an interface related issue which will pop up soon. - # works OK on linux. The test was added to doctest for macOS but still hanging. - set(TEST_SOURCES ${TEST_SOURCES} TestSpecialLps.cpp TestLpSolvers.cpp TestMipSolver.cpp) - endif() + Avgas.cpp + TestSpecialLps.cpp + TestLpSolvers.cpp + TestMipSolver.cpp) if (BUILD_EXTRA_UNIT_TESTS) list(APPEND CMAKE_MODULE_PATH "${HIGHS_SOURCE_DIR}/check/highs-unit-tests") @@ -257,9 +253,10 @@ if ((NOT FAST_BUILD OR ALL_TESTS) AND NOT (BUILD_EXTRA_UNIT_ONLY)) "standgub\; 1.25769949\;" ) elseif(WIN32) - # on windows e226 model status is unknown, rel gap e00 + # on windows e226 model status is unknown + # on windows 25fv47 model status can be unknown, with objective 5.5018458957e+03 set(pdlpInstances - "25fv47\; 5.50184588\;" + "25fv47\; 5.5018458\;" "adlittle\; 2.25494963\;" "afiro\;-4.64753142\;" "avgas\;-7.749999999\;" @@ -455,4 +452,4 @@ if (BUILD_EXTRA_UNIT_TESTS AND BUILD_EXTRA_UNIT_ONLY) DEPENDS unit-test-extra-build) set_tests_properties(unit_tests_extra PROPERTIES TIMEOUT 10000) -endif() \ No newline at end of file +endif() diff --git a/check/TestCAPI.c b/check/TestCAPI.c index e5848f67a1..d75cc71305 100644 --- a/check/TestCAPI.c +++ b/check/TestCAPI.c @@ -1855,6 +1855,118 @@ void test_getModel() { Highs_destroy(highs); } +void test_multiObjective() { + void* highs; + highs = Highs_create(); + const double inf = Highs_getInfinity(highs); + + HighsInt num_col = 2; + HighsInt num_row = 3; + HighsInt num_nz = num_col * num_row; + HighsInt a_format = kHighsMatrixFormatColwise; + HighsInt sense = kHighsObjSenseMaximize; + double offset = -1; + double col_cost[2] = {1, 1}; + double col_lower[2] = {0, 0}; + double col_upper[2] = {inf, inf}; + double row_lower[3] = {-inf, -inf, -inf}; + double row_upper[3] = {18, 8, 14}; + HighsInt a_start[3] = {0, 3, 6}; + HighsInt a_index[6] = {0, 1, 2, 0, 1, 2}; + double a_value[6] = {3, 1, 1, 1, 1, 2}; + HighsInt integrality[2] = {kHighsVarTypeInteger, kHighsVarTypeInteger}; + + Highs_setBoolOptionValue(highs, "output_flag", dev_run); + HighsInt return_status = Highs_passLp(highs, num_col, num_row, num_nz, a_format, sense, + offset, col_cost, col_lower, col_upper, + row_lower, row_upper, a_start, a_index, a_value); + assert(return_status == kHighsStatusOk); + + return_status = Highs_clearLinearObjectives(highs); + assert(return_status == kHighsStatusOk); + + double weight = -1; + double linear_objective_offset = -1; + double coefficients[2] = {1, 1}; + double abs_tolerance = 0; + double rel_tolerance = 0; + HighsInt priority = 10; + return_status = Highs_addLinearObjective(highs, weight, linear_objective_offset, coefficients, abs_tolerance, rel_tolerance, priority); + assert(return_status == kHighsStatusOk); + + weight = 1e-4; + linear_objective_offset = 0; + coefficients[0] = 1; + coefficients[1] = 0; + priority = 0; + return_status = Highs_addLinearObjective(highs, weight, linear_objective_offset, coefficients, abs_tolerance, rel_tolerance, priority); + assert(return_status == kHighsStatusOk); + + return_status = Highs_run(highs); + assert(return_status == kHighsStatusOk); + HighsInt model_status = Highs_getModelStatus(highs); + assert(model_status == kHighsModelStatusOptimal); + + Highs_writeSolutionPretty(highs, ""); + double* col_value = (double*)malloc(sizeof(double) * num_col); + return_status = + Highs_getSolution(highs, col_value, NULL, NULL, NULL); + assertDoubleValuesEqual("col_value[0]", col_value[0], 2); + assertDoubleValuesEqual("col_value[1]", col_value[1], 6); + + Highs_setBoolOptionValue(highs, "blend_multi_objectives", 0); + + if (dev_run) printf("\n***************\nLexicographic 1\n***************\n"); + double weight2[2] = {-1, 1e-4}; + double linear_objective_offset2[2] = {-1, 0}; + double coefficients2[4] = {1, 1, 1, 0}; + double abs_tolerance2[2] = {0, -1}; + double rel_tolerance2[2] = {0, -1}; + HighsInt priority2[2] = {10, 0}; + return_status = Highs_passLinearObjectives(highs, 2, weight2, linear_objective_offset2, coefficients2, abs_tolerance2, rel_tolerance2, priority2); + return_status = Highs_run(highs); + assert(return_status == kHighsStatusOk); + model_status = Highs_getModelStatus(highs); + assert(model_status == kHighsModelStatusOptimal); + Highs_writeSolutionPretty(highs, ""); + return_status = + Highs_getSolution(highs, col_value, NULL, NULL, NULL); + assertDoubleValuesEqual("col_value[0]", col_value[0], 2); + assertDoubleValuesEqual("col_value[1]", col_value[1], 6); + + // weight2[1] = 1e-5; + coefficients2[0] = 1.0001; + abs_tolerance2[0] = 1e-5; + rel_tolerance2[0] = 0.05; + return_status = Highs_passLinearObjectives(highs, 2, weight2, linear_objective_offset2, coefficients2, abs_tolerance2, rel_tolerance2, priority2); + return_status = Highs_run(highs); + assert(return_status == kHighsStatusOk); + model_status = Highs_getModelStatus(highs); + assert(model_status == kHighsModelStatusOptimal); + Highs_writeSolutionPretty(highs, ""); + return_status = + Highs_getSolution(highs, col_value, NULL, NULL, NULL); + assertDoubleValuesEqual("col_value[0]", col_value[0], 4.9); + assertDoubleValuesEqual("col_value[1]", col_value[1], 3.1); + + if (dev_run) printf("\n***************\nLexicographic 2\n***************\n"); + abs_tolerance2[0] = -1; + + return_status = Highs_passLinearObjectives(highs, 2, weight2, linear_objective_offset2, coefficients2, abs_tolerance2, rel_tolerance2, priority2); + return_status = Highs_run(highs); + assert(return_status == kHighsStatusOk); + model_status = Highs_getModelStatus(highs); + assert(model_status == kHighsModelStatusOptimal); + Highs_writeSolutionPretty(highs, ""); + return_status = + Highs_getSolution(highs, col_value, NULL, NULL, NULL); + assertDoubleValuesEqual("col_value[0]", col_value[0], 1.30069); + assertDoubleValuesEqual("col_value[1]", col_value[1], 6.34966); + + Highs_destroy(highs); + free(col_value); +} + /* The horrible C in this causes problems in some of the CI tests, so suppress thius test until the C has been improved @@ -1919,6 +2031,7 @@ int main() { test_ranging(); test_feasibilityRelaxation(); test_getModel(); + test_multiObjective(); return 0; } // test_setSolution(); diff --git a/check/TestCheckSolution.cpp b/check/TestCheckSolution.cpp index e24499f3b6..a4bc79ae87 100644 --- a/check/TestCheckSolution.cpp +++ b/check/TestCheckSolution.cpp @@ -1,4 +1,4 @@ -//#include +// #include #include #include "HCheckConfig.h" diff --git a/check/TestFilereader.cpp b/check/TestFilereader.cpp index 915b73a25c..63fe50e8a2 100644 --- a/check/TestFilereader.cpp +++ b/check/TestFilereader.cpp @@ -11,6 +11,7 @@ #include "lp_data/HighsLpUtils.h" const bool dev_run = false; +const double inf = kHighsInf; TEST_CASE("filereader-edge-cases", "[highs_filereader]") { std::string model = ""; @@ -357,3 +358,61 @@ TEST_CASE("filereader-dD2e", "[highs_filereader]") { // double objective_value = highs.getInfo().objective_function_value; // REQUIRE(objective_value == optimal_objective_value); // } + +TEST_CASE("writeLocalModel", "[highs_filereader]") { + Highs h; + h.setOptionValue("output_flag", dev_run); + std::string write_model_file = "foo.mps"; + HighsModel model; + HighsLp& lp = model.lp_; + ; + lp.num_col_ = 2; + lp.num_row_ = 3; + lp.col_cost_ = {8, 10}; + lp.row_lower_ = {7, 12, 6}; + lp.row_upper_ = {inf, inf, inf}; + lp.a_matrix_.start_ = {0, 3, 6}; + lp.a_matrix_.index_ = {0, 1, 2, 0, 1, 2}; + lp.a_matrix_.value_ = {2, 3, 2, 2, 4, 1}; + + if (dev_run) printf("\nModel with no column lower or upper bounds\n"); + REQUIRE(h.writeLocalModel(model, write_model_file) == HighsStatus::kError); + lp.col_lower_ = {0, 0}; + + if (dev_run) printf("\nModel with no column upper bounds\n"); + REQUIRE(h.writeLocalModel(model, write_model_file) == HighsStatus::kError); + lp.col_upper_ = {inf, inf}; + + // Model has no dimensions for a_matrix_, but these are set in + // writeLocalModel. + if (dev_run) printf("\nModel with no column or row names\n"); + REQUIRE(h.writeLocalModel(model, write_model_file) == HighsStatus::kWarning); + lp.col_names_ = {"C0", "C1"}; + lp.row_names_ = {"R0", "R1", "R2"}; + + if (dev_run) printf("\nModel with column and row names\n"); + REQUIRE(h.writeLocalModel(model, write_model_file) == HighsStatus::kOk); + + // Introduce illegal start + if (dev_run) printf("\nModel with start entry > num_nz\n"); + lp.a_matrix_.start_ = {0, 7, 6}; + REQUIRE(h.writeLocalModel(model, write_model_file) == HighsStatus::kError); + + // Introduce illegal start + if (dev_run) printf("\nModel with start entry -1\n"); + lp.a_matrix_.start_ = {0, -1, 6}; + REQUIRE(h.writeLocalModel(model, write_model_file) == HighsStatus::kError); + lp.a_matrix_.start_ = {0, 3, 6}; + + // Introduce illegal index + if (dev_run) printf("\nModel with index entry -1\n"); + lp.a_matrix_.index_ = {0, -1, 2, 0, 1, 2}; + REQUIRE(h.writeLocalModel(model, write_model_file) == HighsStatus::kError); + + // Introduce illegal index + if (dev_run) printf("\nModel with index entry 3 >= num_row\n"); + lp.a_matrix_.index_ = {0, 1, 3, 0, 1, 2}; + REQUIRE(h.writeLocalModel(model, write_model_file) == HighsStatus::kError); + + std::remove(write_model_file.c_str()); +} diff --git a/check/TestHighsCDouble.cpp b/check/TestHighsCDouble.cpp new file mode 100644 index 0000000000..2f17a40598 --- /dev/null +++ b/check/TestHighsCDouble.cpp @@ -0,0 +1,109 @@ +#include "HCheckConfig.h" +#include "catch.hpp" +#include "util/HighsCDouble.h" +#include "util/HighsRandom.h" + +void testCeil(HighsCDouble x) { + double ceil_x; + double double_x; + ceil_x = double(ceil(x)); + double_x = double(x); + REQUIRE(ceil_x >= double_x); + REQUIRE(ceil(x) >= x); +} + +void testFloor(HighsCDouble x) { + double floor_x; + double double_x; + floor_x = double(floor(x)); + double_x = double(x); + REQUIRE(floor_x <= double_x); + REQUIRE(floor(x) <= x); +} + +TEST_CASE("HighsCDouble-ceil", "[util]") { + HighsCDouble x; + x = -1e-34; + testCeil(x); + x = -1e-32; + testCeil(x); + x = -1e-30; + testCeil(x); + x = -1e-23; + testCeil(x); + x = -1e-12; + testCeil(x); + x = -1e-1; + testCeil(x); + x = -0.99; + testCeil(x); + + x = 0.99; + testCeil(x); + x = 1e-1; + testCeil(x); + x = 1e-12; + testCeil(x); + // This and rest failed in #2041 + x = 1e-23; + testCeil(x); + x = 1e-30; + testCeil(x); + x = 1e-32; + testCeil(x); + x = 1e-34; + testCeil(x); + + HighsRandom rand; + for (HighsInt k = 0; k < 1000; k++) { + double man = rand.fraction(); + HighsInt power = 2 - rand.integer(5); + double exp = std::pow(10, power); + x = man * exp; + testCeil(x); + } +} + +TEST_CASE("HighsCDouble-floor", "[util]") { + HighsCDouble x; + + x = 1e-34; + testFloor(x); + x = 1e-32; + testFloor(x); + x = 1e-30; + testFloor(x); + x = 1e-23; + testFloor(x); + x = 1e-12; + testFloor(x); + x = 1e-1; + testFloor(x); + x = 0.99; + testFloor(x); + + x = -0.99; + testFloor(x); + x = -1e-1; + testFloor(x); + x = -1e-12; + testFloor(x); + // This and rest failed in #2041 + x = -1e-23; + testFloor(x); + x = -1e-30; + testFloor(x); + x = -1e-32; + testFloor(x); + x = -1e-34; + testFloor(x); + + HighsRandom rand; + for (HighsInt k = 0; k < 1000; k++) { + double man = rand.fraction(); + HighsInt power = 2 - rand.integer(5); + double exp = std::pow(10, power); + x = man * exp; + testFloor(x); + } +} diff --git a/check/TestHighsIntegers.cpp b/check/TestHighsIntegers.cpp index 7c8de3c9b4..c08b288610 100644 --- a/check/TestHighsIntegers.cpp +++ b/check/TestHighsIntegers.cpp @@ -1,6 +1,8 @@ #include "HCheckConfig.h" #include "catch.hpp" +// #include "util/HighsCDouble.h" #include "util/HighsIntegers.h" +// #include "util/HighsRandom.h" const bool dev_run = false; diff --git a/check/TestLpSolvers.cpp b/check/TestLpSolvers.cpp index 72b94594d0..6d1f879f6b 100644 --- a/check/TestLpSolvers.cpp +++ b/check/TestLpSolvers.cpp @@ -29,6 +29,7 @@ void testDualObjective(const std::string model) { std::max(1.0, std::fabs(primal_objective)); REQUIRE(relative_primal_dual_gap < 1e-12); } + void testSolver(Highs& highs, const std::string solver, IterationCount& default_iteration_count, const HighsInt int_simplex_strategy = 0) { @@ -302,7 +303,7 @@ TEST_CASE("LP-solver", "[highs_lp_solver]") { const HighsInfo& info = highs.getInfo(); REQUIRE(info.num_dual_infeasibilities == 0); - REQUIRE(info.simplex_iteration_count == 472); + // REQUIRE(info.simplex_iteration_count == 472); // differs on macOS HighsModelStatus model_status = highs.getModelStatus(); REQUIRE(model_status == HighsModelStatus::kOptimal); @@ -315,7 +316,7 @@ TEST_CASE("LP-solver", "[highs_lp_solver]") { return_status = highs.run(); REQUIRE(return_status == HighsStatus::kOk); - REQUIRE(info.simplex_iteration_count == 592); + // REQUIRE(info.simplex_iteration_count == 592); // differs on macOS } TEST_CASE("mip-with-lp-solver", "[highs_lp_solver]") { @@ -641,3 +642,40 @@ TEST_CASE("standard-form-lp", "[highs_lp_solver]") { "maximizing\n"); testStandardForm(highs.getLp()); } + +TEST_CASE("simplex-stats", "[highs_lp_solver]") { + HighsStatus return_status; + + Highs h; + const HighsSimplexStats& simplex_stats = h.getSimplexStats(); + h.setOptionValue("output_flag", dev_run); + std::string model_file = + std::string(HIGHS_DIR) + "/check/instances/adlittle.mps"; + REQUIRE(h.readModel(model_file) == HighsStatus::kOk); + + REQUIRE(h.run() == HighsStatus::kOk); + REQUIRE(simplex_stats.valid); + REQUIRE(simplex_stats.iteration_count == 0); + REQUIRE(simplex_stats.num_invert == 1); + REQUIRE(simplex_stats.last_invert_num_el > 0); + REQUIRE(simplex_stats.last_factored_basis_num_el > 0); + REQUIRE(simplex_stats.col_aq_density == 0); + REQUIRE(simplex_stats.row_ep_density == 0); + REQUIRE(simplex_stats.row_ap_density == 0); + REQUIRE(simplex_stats.row_DSE_density == 0); + if (dev_run) h.reportSimplexStats(stdout); + + h.clearSolver(); + h.setOptionValue("presolve", kHighsOffString); + REQUIRE(h.run() == HighsStatus::kOk); + REQUIRE(simplex_stats.valid); + REQUIRE(simplex_stats.iteration_count > 0); + REQUIRE(simplex_stats.num_invert > 0); + REQUIRE(simplex_stats.last_invert_num_el > 0); + REQUIRE(simplex_stats.last_factored_basis_num_el > 0); + REQUIRE(simplex_stats.col_aq_density > 0); + REQUIRE(simplex_stats.row_ep_density > 0); + REQUIRE(simplex_stats.row_ap_density > 0); + REQUIRE(simplex_stats.row_DSE_density > 0); + if (dev_run) h.reportSimplexStats(stdout); +} diff --git a/check/TestMipSolver.cpp b/check/TestMipSolver.cpp index cfbe1ffbf2..ecb3502316 100644 --- a/check/TestMipSolver.cpp +++ b/check/TestMipSolver.cpp @@ -679,16 +679,16 @@ TEST_CASE("IP-infeasible-unbounded", "[highs_test_mip_solver]") { highs.run(); HighsModelStatus required_model_status; if (k == 0) { - // Presolve off + // Presolve off if (l == 0) { - // MIP solver proves infeasiblilty + // MIP solver proves infeasiblilty required_model_status = HighsModelStatus::kInfeasible; } else { - // Relaxation is unbounded, but origin is feasible + // Relaxation is unbounded, but origin is feasible required_model_status = HighsModelStatus::kUnbounded; } } else { - // Presolve on, and identifies primal infeasible or unbounded + // Presolve on, and identifies primal infeasible or unbounded required_model_status = HighsModelStatus::kUnboundedOrInfeasible; } REQUIRE(highs.getModelStatus() == required_model_status); diff --git a/check/TestMultiObjective.cpp b/check/TestMultiObjective.cpp new file mode 100644 index 0000000000..1a15e8d9d2 --- /dev/null +++ b/check/TestMultiObjective.cpp @@ -0,0 +1,196 @@ +#include "Highs.h" +#include "catch.hpp" + +const bool dev_run = false; + +bool smallDoubleDifference(double v0, double v1) { + double difference = std::fabs(v0 - v1); + // printf("smallDoubleDifference = %g\n", difference); + return difference < 1e-4; +} + +TEST_CASE("multi-objective", "[util]") { + HighsLp lp; + lp.num_col_ = 2; + lp.num_row_ = 3; + lp.col_cost_ = {0, 0}; + lp.col_lower_ = {0, 0}; + lp.col_upper_ = {kHighsInf, kHighsInf}; + lp.row_lower_ = {-kHighsInf, -kHighsInf, -kHighsInf}; + lp.row_upper_ = {18, 8, 14}; + lp.a_matrix_.start_ = {0, 3, 6}; + lp.a_matrix_.index_ = {0, 1, 2, 0, 1, 2}; + lp.a_matrix_.value_ = {3, 1, 1, 1, 1, 2}; + Highs h; + h.setOptionValue("output_flag", dev_run); + + for (HighsInt k = 0; k < 2; k++) { + // Pass 0 is continuous; pass 1 integer + if (dev_run) + printf( + "\n******************\nPass %d: var type is %s\n******************\n", + int(k), k == 0 ? "continuous" : "integer"); + for (HighsInt l = 0; l < 2; l++) { + // Pass 0 is with unsigned weights and coefficients + double obj_mu = l == 0 ? 1 : -1; + if (dev_run) + printf( + "\n******************\nPass %d: objective multiplier is " + "%g\n******************\n", + int(l), obj_mu); + + if (k == 0) { + lp.integrality_.clear(); + } else if (k == 1) { + lp.integrality_ = {HighsVarType::kInteger, HighsVarType::kInteger}; + } + h.passModel(lp); + + h.setOptionValue("blend_multi_objectives", true); + + HighsLinearObjective linear_objective; + std::vector linear_objectives; + REQUIRE(h.clearLinearObjectives() == HighsStatus::kOk); + + // Begin with an illegal linear objective + if (dev_run) printf("\nPass illegal linear objective\n"); + linear_objective.weight = -obj_mu; + linear_objective.offset = -obj_mu; + linear_objective.coefficients = {obj_mu * 1, obj_mu * 1, obj_mu * 0}; + linear_objective.abs_tolerance = 0.0; + linear_objective.rel_tolerance = 0.0; + REQUIRE(h.addLinearObjective(linear_objective) == HighsStatus::kError); + // Now legalise the linear objective so LP has nonunique optimal + // solutions on the line joining (2, 6) and (5, 3) + if (dev_run) printf("\nPass legal linear objective\n"); + linear_objective.coefficients = {obj_mu * 1, obj_mu * 1}; + REQUIRE(h.addLinearObjective(linear_objective) == HighsStatus::kOk); + + REQUIRE(h.run() == HighsStatus::kOk); + h.writeSolution("", kSolutionStylePretty); + REQUIRE(smallDoubleDifference(h.getInfo().objective_function_value, -7)); + // Save the linear objective for the next + linear_objectives.push_back(linear_objective); + + // Add a second linear objective with a very small minimization + // weight that should push the optimal solution to (2, 6) + if (dev_run) printf("\nPass second linear objective\n"); + linear_objective.weight = obj_mu * 1e-4; + linear_objective.offset = 0; + linear_objective.coefficients = {obj_mu * 1, obj_mu * 0}; + REQUIRE(h.addLinearObjective(linear_objective) == HighsStatus::kOk); + + REQUIRE(h.run() == HighsStatus::kOk); + h.writeSolution("", kSolutionStylePretty); + REQUIRE(smallDoubleDifference(h.getSolution().col_value[0], 2)); + REQUIRE(smallDoubleDifference(h.getSolution().col_value[1], 6)); + linear_objectives.push_back(linear_objective); + + if (dev_run) printf("\nClear and pass two linear objectives\n"); + REQUIRE(h.clearLinearObjectives() == HighsStatus::kOk); + REQUIRE(h.passLinearObjectives(2, linear_objectives.data()) == + HighsStatus::kOk); + REQUIRE(h.run() == HighsStatus::kOk); + h.writeSolution("", kSolutionStylePretty); + REQUIRE(smallDoubleDifference(h.getSolution().col_value[0], 2)); + REQUIRE(smallDoubleDifference(h.getSolution().col_value[1], 6)); + + // Set illegal priorities - that can be passed OK since + // blend_multi_objectives = true + if (dev_run) + printf( + "\nSetting priorities that will be illegal when using " + "lexicographic " + "optimization\n"); + linear_objectives[0].priority = 0; + linear_objectives[1].priority = 0; + REQUIRE(h.passLinearObjectives(2, linear_objectives.data()) == + HighsStatus::kOk); + + // Now test lexicographic optimization + h.setOptionValue("blend_multi_objectives", false); + + if (dev_run) printf("\nLexicographic using illegal priorities\n"); + REQUIRE(h.run() == HighsStatus::kError); + + if (dev_run) + printf( + "\nSetting priorities that are illegal now blend_multi_objectives " + "= " + "false\n"); + REQUIRE(h.passLinearObjectives(2, linear_objectives.data()) == + HighsStatus::kError); + + if (dev_run) + printf( + "\nSetting legal priorities for blend_multi_objectives = false\n"); + linear_objectives[0].priority = 10; + REQUIRE(h.passLinearObjectives(2, linear_objectives.data()) == + HighsStatus::kOk); + + if (dev_run) + printf("\nLexicographic using existing multi objective data\n"); + REQUIRE(h.run() == HighsStatus::kOk); + h.writeSolution("", kSolutionStylePretty); + REQUIRE(smallDoubleDifference(h.getSolution().col_value[0], 2)); + REQUIRE(smallDoubleDifference(h.getSolution().col_value[1], 6)); + + // Back to blending + h.setOptionValue("blend_multi_objectives", true); + // h.setOptionValue("output_flag", true); + REQUIRE(h.clearLinearObjectives() == HighsStatus::kOk); + linear_objectives[0].coefficients = {obj_mu * 1.0001, obj_mu * 1}; + linear_objectives[0].abs_tolerance = 1e-5; + linear_objectives[0].rel_tolerance = 0.05; + linear_objectives[1].weight = obj_mu * 1e-3; + if (dev_run) + printf( + "\nBlending: first solve objective just giving unique optimal " + "solution\n"); + REQUIRE(h.passLinearObjectives(1, linear_objectives.data()) == + HighsStatus::kOk); + + REQUIRE(h.run() == HighsStatus::kOk); + h.writeSolution("", kSolutionStylePretty); + + REQUIRE(h.passLinearObjectives(2, linear_objectives.data()) == + HighsStatus::kOk); + + REQUIRE(h.run() == HighsStatus::kOk); + h.writeSolution("", kSolutionStylePretty); + + // Back to lexicographic optimization + h.setOptionValue("blend_multi_objectives", false); + + if (dev_run) printf("\nLexicographic using non-trivial tolerances\n"); + REQUIRE(h.run() == HighsStatus::kOk); + h.writeSolution("", kSolutionStylePretty); + + if (k == 0) { + REQUIRE(smallDoubleDifference(h.getSolution().col_value[0], 4.9)); + REQUIRE(smallDoubleDifference(h.getSolution().col_value[1], 3.1)); + } else { + REQUIRE(smallDoubleDifference(h.getSolution().col_value[0], 5)); + REQUIRE(smallDoubleDifference(h.getSolution().col_value[1], 3)); + } + + linear_objectives[0].abs_tolerance = kHighsInf; + + REQUIRE(h.passLinearObjectives(2, linear_objectives.data()) == + HighsStatus::kOk); + + REQUIRE(h.run() == HighsStatus::kOk); + h.writeSolution("", kSolutionStylePretty); + + // printf("Solution = [%23.18g, %23.18g]\n", + // h.getSolution().col_value[0], h.getSolution().col_value[1]); + if (k == 0) { + REQUIRE(smallDoubleDifference(h.getSolution().col_value[0], 1.30069)); + REQUIRE(smallDoubleDifference(h.getSolution().col_value[1], 6.34966)); + } else { + REQUIRE(smallDoubleDifference(h.getSolution().col_value[0], 2)); + REQUIRE(smallDoubleDifference(h.getSolution().col_value[1], 6)); + } + } + } +} diff --git a/check/TestPresolve.cpp b/check/TestPresolve.cpp index e4263c010e..7d2e5d9604 100644 --- a/check/TestPresolve.cpp +++ b/check/TestPresolve.cpp @@ -616,7 +616,7 @@ TEST_CASE("presolve-slacks", "[highs_test_presolve]") { lp.a_matrix_.index_ = {0, 0}; lp.a_matrix_.value_ = {1, 1}; Highs h; - // h.setOptionValue("output_flag", dev_run); + h.setOptionValue("output_flag", dev_run); REQUIRE(h.passModel(lp) == HighsStatus::kOk); REQUIRE(h.presolve() == HighsStatus::kOk); REQUIRE(h.getPresolvedLp().num_col_ == 0); diff --git a/docs/make.jl b/docs/make.jl index c1f6bc70b9..93947c773c 100644 --- a/docs/make.jl +++ b/docs/make.jl @@ -63,10 +63,14 @@ Documenter.makedocs( "structures/classes/HighsSparseMatrix.md", "structures/classes/HighsLp.md", "structures/classes/HighsHessian.md", - "structures/classes/HighsModel.md", - "structures/classes/HighsSolution.md", - "structures/classes/HighsBasis.md", - "structures/classes/HighsInfo.md", + "structures/classes/HighsModel.md" + ], + "Structures" => Any[ + "structures/structs/index.md", + "structures/structs/HighsSolution.md", + "structures/structs/HighsBasis.md", + "structures/structs/HighsInfo.md", + "structures/structs/HighsLinearObjective.md" ], ], "Callbacks" => "callbacks.md", diff --git a/docs/src/guide/further.md b/docs/src/guide/further.md index 3597928f5b..ed111f6682 100644 --- a/docs/src/guide/further.md +++ b/docs/src/guide/further.md @@ -104,3 +104,65 @@ HiGHS. Note that this does not affect how the incumbent model is solved. There are two corresponding [postsolve](@ref Presolve/postsolve) methods, according to whether there are just solution values, or also a basis. + +## [Multi-objective optimization](@id guide-multi-objective-optimization) + +Users can specify multiple linear objectives with respect to which +HiGHS will optimize by either blending them, or by performing +lexicographic optimization according to the truth of the +[blend\_multi\_objectives](@ref blend_multi_objectives) option. Each +linear objective is represented by the following data, held in the +[HighsLinearObjective](@ref HighsLinearObjective) structure + +- weight: Scalar of type double - The weight of this objective when blending +- offset: Scalar of type double - The offset of this objective +- coefficients: Vector of type double - The coefficients of this objective +- abs\_tolerance: Scalar of type double - The absolute tolerance on this objective when performing lexicographic optimization +- rel\_tolerance: Scalar of type double - The relative tolerance on this objective when performing lexicographic optimization +- priority: Scalar of type HighsInt - The priority of this objective when performing lexicographic optimization + +### Methods + +Multi-objective optimization in HiGHS is defined by the following methods + +- [addLinearObjective](@ref Multi-objective-optimization] - Add a single `HighsLinearObjective` instance to any already stored in HiGHS +- [clearLinearObjectives](@ref Multi-objective-optimization] - Clears any linear objectives stored in HiGHS + +When there is at least one `HighsLinearObjective` instance in HiGHS, +the `col_cost_` data in the incumbent model is ignored. + +### Blending multiple linear objectives + +When [blend\_multi\_objectives](@ref blend_multi_objectives) is `true`, +as it is by default, any `HighsLinearObjective` instances will be +combined according to the `weight` values, and the resulting objective +will be minimized. Hence, any objectives that should be maximized +within the combination must have a negative `weight` value. + +### Lexicographic optimization of multiple linear objectives + +When [blend\_multi\_objectives](@ref blend_multi_objectives) is `false`, +HiGHS will optimize lexicographically with respect to any +`HighsLinearObjective` instances. This is carried out as follows, according to the +`priority` values in `HighsLinearObjective` instances. Note that _all +priority values must be distinct_. + +* Minimize/maximize with respect to the linear objective of highest priority value, according to whether its `weight` is positive/negative + +* Add a constraint to the model so that the value of the linear objective of highest priority satsifies a bound given by the values of `abs_tolerance` and/or `rel_tolerance`. + + If the objective was minimized to a value ``f^*\ge0``, then the constraint ensures that the this objective value is no greater than ``\min(f^*+abs\_tolerance,~f^*\times[1+rel\_tolerance]).`` + + + If the objective was minimized to a value ``f^*<0``, then the constraint ensures that the this objective value is no greater than ``\min(f^*+abs\_tolerance,~f^*\times[1-rel\_tolerance]).`` + + + If the objective was maximized to a value ``f^*\ge0``, then the constraint ensures that the this objective value is no less than ``\max(f^*-abs\_tolerance,~f^*\times[1-rel\_tolerance]).`` + + + If the objective was maximized to a value ``f^*<0``, then the constraint ensures that the this objective value is no less than ``\max(f^*-abs\_tolerance,~f^*\times[1+rel\_tolerance]).`` + +* Minimize/maximize with respect to the linear objective of next highest priority, and then add a corresponding objective constraint to the model, repeating until optimization with respect to the linear objective of lowest priority has taken place. + +Note + +* Negative values of `abs_tolerance` and `rel_tolerance` will be ignored. This is a convenient way of "switching off" a bounding technique that is not of interest. +* When the model is continuous, no dual information will be returned if there is more than one linear objective. + + diff --git a/docs/src/interfaces/csharp.md b/docs/src/interfaces/csharp.md index e9b24f1946..bfc2251f65 100644 --- a/docs/src/interfaces/csharp.md +++ b/docs/src/interfaces/csharp.md @@ -17,7 +17,7 @@ The nuget package Highs.Native is on https://www.nuget.org, at https://www.nuget It can be added to your C# project with `dotnet` ```shell -dotnet add package Highs.Native --version 1.8.1 +dotnet add package Highs.Native --version 1.9.0 ``` The nuget package contains runtime libraries for diff --git a/docs/src/interfaces/python/example-py.md b/docs/src/interfaces/python/example-py.md index 50d40c59e7..fd4eef038b 100644 --- a/docs/src/interfaces/python/example-py.md +++ b/docs/src/interfaces/python/example-py.md @@ -237,5 +237,11 @@ print('Basis validity = ', h.basisValidityToString(info.basis_validity)) * `presolveRuleTypeToString` * `postsolve` - - +## Multi-objective optimization + +* `addLinearObjective` +* `clearLinearObjectives` + + + + diff --git a/docs/src/options/definitions.md b/docs/src/options/definitions.md index 8055067601..f4e932523c 100644 --- a/docs/src/options/definitions.md +++ b/docs/src/options/definitions.md @@ -353,3 +353,8 @@ - Range: {0, 2147483647} - Default: 4000 +## blend\_multi\_objectives +- Blend multiple objectives or apply lexicographically +- Type: boolean +- Default: "true" + diff --git a/docs/src/structures/classes/index.md b/docs/src/structures/classes/index.md index f2f5ed4326..9277bc5ef8 100644 --- a/docs/src/structures/classes/index.md +++ b/docs/src/structures/classes/index.md @@ -6,8 +6,5 @@ The data members of fundamental classes in HiGHS are defined in this section. * [HighsLp](@ref) * [HighsHessian](@ref) * [HighsModel](@ref) - * [HighsSolution](@ref) - * [HighsBasis](@ref) - * [HighsInfo](@ref) Class data members for internal use only are not documented. diff --git a/docs/src/structures/classes/HighsBasis.md b/docs/src/structures/structs/HighsBasis.md similarity index 96% rename from docs/src/structures/classes/HighsBasis.md rename to docs/src/structures/structs/HighsBasis.md index bfc71c7efd..6bac592bd5 100644 --- a/docs/src/structures/classes/HighsBasis.md +++ b/docs/src/structures/structs/HighsBasis.md @@ -1,6 +1,6 @@ # HighsBasis -The basis of a model is communicated via an instance of the HighsBasis class +The basis of a model is communicated via an instance of the HighsBasis structure - valid: Scalar of type bool - Indicates whether the basis is valid - col\_status: Vector of type [HighsBasisStatus](@ref) - Comparison with [HighsBasisStatus](@ref) gives the basis status of a column diff --git a/docs/src/structures/classes/HighsInfo.md b/docs/src/structures/structs/HighsInfo.md similarity index 98% rename from docs/src/structures/classes/HighsInfo.md rename to docs/src/structures/structs/HighsInfo.md index f0ddebee4f..9a3217adc3 100644 --- a/docs/src/structures/classes/HighsInfo.md +++ b/docs/src/structures/structs/HighsInfo.md @@ -1,6 +1,6 @@ # HighsInfo -Scalar information about a solved model is communicated via an instance of the HighsInfo class +Scalar information about a solved model is communicated via an instance of the HighsInfo structure ## valid - Indicates whether the values in a HighsInfo instance are valid diff --git a/docs/src/structures/structs/HighsLinearObjective.md b/docs/src/structures/structs/HighsLinearObjective.md new file mode 100644 index 0000000000..9cc9f09d12 --- /dev/null +++ b/docs/src/structures/structs/HighsLinearObjective.md @@ -0,0 +1,11 @@ +# HighsLinearObjective + +A linear objective for a model is communicated via an instance of the HighsLinearObjective structure + +- weight: Scalar of type double - The weight of this objective when blending +- offset: Scalar of type double - The offset of this objective +- coefficients: Vector of type double - The coefficients of this objective +- abs\_tolerance: Scalar of type double - The absolute tolerance on this objective when performing lexicographic optimization +- rel\_tolerance: Scalar of type double - The relative tolerance on this objective when performing lexicographic optimization +- priority: Scalar of type HighsInt - The priority of this objective when performing lexicographic optimization + diff --git a/docs/src/structures/classes/HighsSolution.md b/docs/src/structures/structs/HighsSolution.md similarity index 96% rename from docs/src/structures/classes/HighsSolution.md rename to docs/src/structures/structs/HighsSolution.md index 78fc48a959..5b9dbf6a73 100644 --- a/docs/src/structures/classes/HighsSolution.md +++ b/docs/src/structures/structs/HighsSolution.md @@ -1,6 +1,6 @@ # HighsSolution -The solution of a model is communicated via an instance of the HighsSolution class +The solution of a model is communicated via an instance of the HighsSolution structure - value\_valid: Scalar of type bool - Indicates whether the column and row values are valid - dual\_valid: Scalar of type bool - Indicates whether the column and row [duals](@ref Dual-values) are valid diff --git a/docs/src/structures/structs/index.md b/docs/src/structures/structs/index.md new file mode 100644 index 0000000000..d8c763c2b3 --- /dev/null +++ b/docs/src/structures/structs/index.md @@ -0,0 +1,10 @@ +# [Overview](@id structs-overview) + +The data members of fundamental structs in HiGHS are defined in this section. + + * [HighsSolution](@ref) + * [HighsBasis](@ref) + * [HighsInfo](@ref) + * [HighsLinearObjective](@ref) + +Structure data members for internal use only are not documented. diff --git a/examples/call_highs_from_python.py b/examples/call_highs_from_python.py index 2ff88f281b..61e6356f3e 100644 --- a/examples/call_highs_from_python.py +++ b/examples/call_highs_from_python.py @@ -493,4 +493,8 @@ def user_interrupt_callback( print("row_bound:", iis.row_bound) print("col_index:", iis.col_index) -print("col_bound:", iis.col_bound) \ No newline at end of file +print("col_bound:", iis.col_bound) + +# ~~~ +# Clear so that incumbent model is empty +h.clear() diff --git a/examples/knapsack.py b/examples/knapsack.py new file mode 100644 index 0000000000..992cac793d --- /dev/null +++ b/examples/knapsack.py @@ -0,0 +1,55 @@ +import numpy as np +import highspy + +h = highspy.Highs() +h.setOptionValue("output_flag", False); +h.setOptionValue("presolve", "off"); +inf = highspy.kHighsInf +lp = highspy.HighsLp() +lp.sense_ = highspy.ObjSense.kMaximize +lp.num_col_ = 5 +lp.num_row_ = 1 +lp.col_cost_ = np.array([8, 5, 3, 11, 7], dtype=np.double) +lp.col_lower_ = np.array([0, 0, 0, 0, 0], dtype=np.double) +lp.col_upper_ = np.array([1, 1, 1, 1, 1], dtype=np.double) +lp.row_lower_ = np.array([-inf], dtype=np.double) +lp.row_upper_ = np.array([11], dtype=np.double) +lp.a_matrix_.format_ = highspy.MatrixFormat.kRowwise +lp.a_matrix_.start_ = np.array([0, 5]) +lp.a_matrix_.index_ = np.array([0, 1, 2, 3, 4]) +lp.a_matrix_.value_ = np.array([4, 3, 1, 5, 4], dtype=np.double) +lp.integrality_ = np.array([highspy.HighsVarType.kInteger, highspy.HighsVarType.kInteger, highspy.HighsVarType.kInteger, highspy.HighsVarType.kInteger, highspy.HighsVarType.kInteger]) +h.passModel(lp) + +h.run() +solution = h.getSolution() +print(f"Solution: ({solution.col_value[0]}, {solution.col_value[1]}, {solution.col_value[2]}, {solution.col_value[3]}, {solution.col_value[4]})") + +# Solution is [1, 0, 1, 1, 0] + +# Illustrate setSolution + +# First by passing back the optimal solution + +h.clearSolver() +h.setSolution(solution) +h.run() +solution = h.getSolution() +print(f"Solution: ({solution.col_value[0]}, {solution.col_value[1]}, {solution.col_value[2]}, {solution.col_value[3]}, {solution.col_value[4]})") + +# Now passing back the optimal values of two variables as a sparse solution +h.clearSolver() +index = np.array([0, 3]) +value = np.array([1, 1], dtype=np.double) +h.setSolution(2, index, value) +h.run() +solution = h.getSolution() +print(f"Solution: ({solution.col_value[0]}, {solution.col_value[1]}, {solution.col_value[2]}, {solution.col_value[3]}, {solution.col_value[4]})") + +# Test passing back the optimal value of one variable, and a non-optimal value of another, as a sparse solution in untyped array +h.clearSolver() +h.setSolution(2, np.array([0, 4]), np.array([1, 1])) +h.run() +solution = h.getSolution() +print(f"Solution: ({solution.col_value[0]}, {solution.col_value[1]}, {solution.col_value[2]}, {solution.col_value[3]}, {solution.col_value[4]})") + diff --git a/meson.build b/meson.build index de7119ea66..62a3825f55 100644 --- a/meson.build +++ b/meson.build @@ -1,5 +1,5 @@ project('highs', 'cpp', 'c', - version : '1.8.1', + version : '1.9.0', meson_version: '>= 1.1.0', default_options : ['warning_level=1', 'cpp_std=c++17', diff --git a/nuget/README.md b/nuget/README.md index 68d2034802..177ed07cda 100644 --- a/nuget/README.md +++ b/nuget/README.md @@ -7,7 +7,7 @@ The nuget package Highs.Native is on https://www.nuget.org, at https://www.nuget It can be added to your C# project with `dotnet` ```shell -dotnet add package Highs.Native --version 1.8.1 +dotnet add package Highs.Native --version 1.9.0 ``` The nuget package contains runtime libraries for diff --git a/pyproject.toml b/pyproject.toml index 0a1dee0f11..1f625d2bad 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -8,7 +8,7 @@ build-backend = "scikit_build_core.build" [project] name = "highspy" -version = "1.8.1" +version = "1.9.0" description = "A thin set of pybind11 wrappers to HiGHS" authors = [{ name = "HiGHS developers", email = "highsopt@gmail.com" }] readme = "README.md" @@ -74,9 +74,17 @@ sdist.exclude = [ ".clang-format", "__setup.py", "BUILD.bazel", - "meson*", + "**meson**", "MODS.md", "WORKSPACE", + "nuget/", + "nuget/README.md", + "src/*.bazel*", + "src/*.meson*", + "interfaces/*csharp*", + "interfaces/*fortran*", + "flake.*", + "highs.pc.in" ] @@ -90,9 +98,9 @@ sdist.exclude = [ # # The versions of Ninja to allow. If Ninja is not present on the system or does # # not pass this specifier, it will be downloaded via PyPI if possible. An empty # # string will disable this check. -# ninja.version = ">=1.5" +# ninja.version = ">=1.5" -# # If CMake is not present on the system or is older required, it will be +# # If Ninja is not present on the system or is older required, it will be # # downloaded via PyPI if possible. An empty string will disable this check. # ninja.make-fallback = true diff --git a/src/Highs.h b/src/Highs.h index 34d1044f5f..70d800a5de 100644 --- a/src/Highs.h +++ b/src/Highs.h @@ -150,6 +150,24 @@ class Highs { HighsStatus passHessian(const HighsInt dim, const HighsInt num_nz, const HighsInt format, const HighsInt* start, const HighsInt* index, const double* value); + /** + * @brief Pass multiple linear objectives for the incumbent model + */ + HighsStatus passLinearObjectives( + const HighsInt num_linear_objective, + const HighsLinearObjective* linear_objective); + + /** + * @brief Add a linear objective for the incumbent model + */ + HighsStatus addLinearObjective(const HighsLinearObjective& linear_objective, + const HighsInt iObj = -1); + + /** + * @brief Clear the multiple linear objective data + */ + HighsStatus clearLinearObjectives(); + /** * @brief Pass a column name to the incumbent model */ @@ -183,7 +201,7 @@ class Highs { HighsStatus presolve(); /** - * @brief Solve the incumbent model according to the specified options + * @brief Run the solver, accounting for any multiple objective */ HighsStatus run(); @@ -1197,6 +1215,14 @@ class Highs { static void resetGlobalScheduler(bool blocking = false); // Start of advanced methods for HiGHS MIP solver + + const HighsSimplexStats& getSimplexStats() const { + return ekk_instance_.getSimplexStats(); + } + void reportSimplexStats(FILE* file) const { + ekk_instance_.reportSimplexStats(file); + } + /** * @brief Get the hot start basis data from the most recent simplex * solve. Advanced method: for HiGHS MIP solver @@ -1389,6 +1415,8 @@ class Highs { ICrashInfo icrash_info_; HighsModel model_; + std::vector multi_linear_objective_; + HighsModel presolved_model_; HighsTimer timer_; @@ -1397,7 +1425,6 @@ class Highs { HighsInfo info_; HighsRanging ranging_; HighsIis iis_; - std::vector saved_objective_and_solution_; HighsPresolveStatus model_presolve_status_ = @@ -1424,6 +1451,8 @@ class Highs { bool written_log_header = false; + HighsStatus solve(); + void exactResizeModel() { this->model_.lp_.exactResize(); this->model_.hessian_.exactResize(); @@ -1600,6 +1629,10 @@ class Highs { HighsInt* iis_col_index, HighsInt* iis_row_index, HighsInt* iis_col_bound, HighsInt* iis_row_bound); + HighsStatus returnFromLexicographicOptimization( + const HighsStatus return_status, HighsInt original_lp_num_row); + HighsStatus multiobjectiveSolve(); + bool aFormatOk(const HighsInt num_nz, const HighsInt format); bool qFormatOk(const HighsInt num_nz, const HighsInt format); void clearZeroHessian(); @@ -1623,6 +1656,10 @@ class Highs { const bool constraint, const double ill_conditioning_bound); bool infeasibleBoundsOk(); + bool validLinearObjective(const HighsLinearObjective& linear_objective, + const HighsInt iObj) const; + bool hasRepeatedLinearObjectivePriorities( + const HighsLinearObjective* linear_objective = nullptr) const; }; // Start of deprecated methods not in the Highs class diff --git a/src/highs_bindings.cpp b/src/highs_bindings.cpp index b8f7e249cf..5b4069a96c 100644 --- a/src/highs_bindings.cpp +++ b/src/highs_bindings.cpp @@ -159,6 +159,10 @@ HighsStatus highs_passHessianPointers(Highs* h, const HighsInt dim, q_value_ptr); } +HighsStatus highs_addLinearObjective(Highs* h, const HighsLinearObjective& linear_objective) { + return h->addLinearObjective(linear_objective, -1); +} + HighsStatus highs_postsolve(Highs* h, const HighsSolution& solution, const HighsBasis& basis) { return h->postsolve(solution, basis); @@ -638,7 +642,7 @@ HighsStatus highs_setCallback( data.ptr()); } -PYBIND11_MODULE(_core, m) { +PYBIND11_MODULE(_core, m, py::mod_gil_not_used()) { // To keep a smaller diff, for reviewers, the declarations are not moved, but // keep in mind: // C++ enum classes :: don't need .export_values() @@ -939,6 +943,8 @@ PYBIND11_MODULE(_core, m) { .def("passModel", &highs_passLpPointers) .def("passHessian", &highs_passHessian) .def("passHessian", &highs_passHessianPointers) + .def("addLinearObjective", &highs_addLinearObjective) + .def("clearLinearObjectives", &Highs::clearLinearObjectives) .def("passColName", &Highs::passColName) .def("passRowName", &Highs::passRowName) .def("readModel", &Highs::readModel) @@ -1148,6 +1154,14 @@ PYBIND11_MODULE(_core, m) { .def(py::init<>()) .def_readwrite("simplex_time", &HighsIisInfo::simplex_time) .def_readwrite("simplex_iterations", &HighsIisInfo::simplex_iterations); + py::class_(m, "HighsLinearObjective") + .def(py::init<>()) + .def_readwrite("weight", &HighsLinearObjective::weight) + .def_readwrite("offset", &HighsLinearObjective::offset) + .def_readwrite("coefficients", &HighsLinearObjective::coefficients) + .def_readwrite("abs_tolerance", &HighsLinearObjective::abs_tolerance) + .def_readwrite("rel_tolerance", &HighsLinearObjective::rel_tolerance) + .def_readwrite("priority", &HighsLinearObjective::priority); // constants m.attr("kHighsInf") = kHighsInf; m.attr("kHighsIInf") = kHighsIInf; diff --git a/src/highspy/__init__.py b/src/highspy/__init__.py index 9e3c03a0e1..e3044e8ccb 100644 --- a/src/highspy/__init__.py +++ b/src/highspy/__init__.py @@ -29,6 +29,7 @@ HighsRanging, kHighsInf, kHighsIInf, + HighsLinearObjective, HIGHS_VERSION_MAJOR, HIGHS_VERSION_MINOR, HIGHS_VERSION_PATCH, diff --git a/src/highspy/highs.py b/src/highspy/highs.py index 80255b15d6..af42594be9 100644 --- a/src/highspy/highs.py +++ b/src/highspy/highs.py @@ -17,7 +17,8 @@ ) # backwards typing support information for HighspyArray -if sys.version_info >= (3, 9): +np_version = tuple(map(int, np.__version__.split('.'))) +if sys.version_info >= (3, 9) and np_version >= (1,22,0): ndarray_object_type = np.ndarray[Any, np.dtype[np.object_]] else: ndarray_object_type = np.ndarray diff --git a/src/interfaces/highs_c_api.cpp b/src/interfaces/highs_c_api.cpp index 8dad0e5216..3c464796e2 100644 --- a/src/interfaces/highs_c_api.cpp +++ b/src/interfaces/highs_c_api.cpp @@ -291,6 +291,58 @@ HighsInt Highs_passHessian(void* highs, const HighsInt dim, ->passHessian(dim, num_nz, format, start, index, value); } +HighsInt Highs_passLinearObjectives(const void* highs, + const HighsInt num_linear_objective, + const double* weight, const double* offset, + const double* coefficients, + const double* abs_tolerance, + const double* rel_tolerance, + const HighsInt* priority) { + HighsInt status = Highs_clearLinearObjectives(highs); + if (status != kHighsStatusOk) return status; + HighsLinearObjective linear_objective; + for (HighsInt iObj = 0; iObj < num_linear_objective; iObj++) { + HighsInt num_col = Highs_getNumCol(highs); + linear_objective.weight = weight[iObj]; + linear_objective.offset = offset[iObj]; + for (HighsInt iCol = 0; iCol < num_col; iCol++) + linear_objective.coefficients.push_back( + coefficients[iObj * num_col + iCol]); + linear_objective.abs_tolerance = abs_tolerance[iObj]; + linear_objective.rel_tolerance = rel_tolerance[iObj]; + linear_objective.priority = priority[iObj]; + linear_objective.weight = weight[iObj]; + status = + HighsInt(((Highs*)highs)->addLinearObjective(linear_objective, iObj)); + if (status != kHighsStatusOk) return status; + linear_objective.coefficients.clear(); + } + return kHighsStatusOk; +} + +HighsInt Highs_addLinearObjective(const void* highs, const double weight, + const double offset, + const double* coefficients, + const double abs_tolerance, + const double rel_tolerance, + const HighsInt priority) { + HighsLinearObjective linear_objective; + HighsInt num_col = Highs_getNumCol(highs); + linear_objective.weight = weight; + linear_objective.offset = offset; + for (HighsInt iCol = 0; iCol < num_col; iCol++) + linear_objective.coefficients.push_back(coefficients[iCol]); + linear_objective.abs_tolerance = abs_tolerance; + linear_objective.rel_tolerance = rel_tolerance; + linear_objective.priority = priority; + linear_objective.weight = weight; + return HighsInt(((Highs*)highs)->addLinearObjective(linear_objective)); +} + +HighsInt Highs_clearLinearObjectives(const void* highs) { + return HighsInt(((Highs*)highs)->clearLinearObjectives()); +} + HighsInt Highs_passRowName(const void* highs, const HighsInt row, const char* name) { return (HighsInt)((Highs*)highs)->passRowName(row, std::string(name)); diff --git a/src/interfaces/highs_c_api.h b/src/interfaces/highs_c_api.h index a59ec64a17..0feb39bf35 100644 --- a/src/interfaces/highs_c_api.h +++ b/src/interfaces/highs_c_api.h @@ -557,6 +557,72 @@ HighsInt Highs_passHessian(void* highs, const HighsInt dim, const HighsInt* start, const HighsInt* index, const double* value); +/** + * Passes multiple linear objective data to HiGHS, clearing any such + * data already in HiGHS + * + * @param highs A pointer to the Highs instance. + * @param weight A pointer to the weights of the linear objective, with + * its positive/negative sign determining whether it is + * minimized or maximized during lexicographic optimization + * @param offset A pointer to the objective offsets + * @param coefficients A pointer to the objective coefficients + * @param abs_tolerance A pointer to the absolute tolerances used when + * constructing objective constraints during lexicographic + * optimization + * @param rel_tolerance A pointer to the relative tolerances used when + * constructing objective constraints during lexicographic + * optimization + * @param priority A pointer to the priorities of the objectives during + * lexicographic optimization + * + * @returns A `kHighsStatus` constant indicating whether the call succeeded. + */ + +HighsInt Highs_passLinearObjectives(const void* highs, + const HighsInt num_linear_objective, + const double* weight, const double* offset, + const double* coefficients, + const double* abs_tolerance, + const double* rel_tolerance, + const HighsInt* priority); + +/** + * Adds linear objective data to HiGHS + * + * @param highs A pointer to the Highs instance. + * @param weight The weight of the linear objective, with its + * positive/negative sign determining whether it is + * minimized or maximized during lexicographic + * optimization + * @param offset The objective offset + * @param coefficients A pointer to the objective coefficients + * @param abs_tolerance The absolute tolerance used when constructing an + * objective constraint during lexicographic optimization + * @param rel_tolerance The relative tolerance used when constructing an + * objective constraint during lexicographic optimization + * @param priority The priority of this objective during lexicographic + * optimization + * + * @returns A `kHighsStatus` constant indicating whether the call succeeded. + */ + +HighsInt Highs_addLinearObjective(const void* highs, const double weight, + const double offset, + const double* coefficients, + const double abs_tolerance, + const double rel_tolerance, + const HighsInt priority); + +/** + * Clears any multiple linear objective data in HiGHS + * + * @param highs A pointer to the Highs instance. + * + * @returns A `kHighsStatus` constant indicating whether the call succeeded. + */ + +HighsInt Highs_clearLinearObjectives(const void* highs); /** * Pass the name of a row. * @@ -945,7 +1011,7 @@ HighsInt Highs_getDualRay(const void* highs, HighsInt* has_dual_ray, * filled with the unboundedness * direction. */ -HighsInt getDualUnboundednessDirection( +HighsInt Highs_getDualUnboundednessDirection( const void* highs, HighsInt* has_dual_unboundedness_direction, double* dual_unboundedness_direction_value); diff --git a/src/lp_data/HStruct.h b/src/lp_data/HStruct.h index e54657d406..35a0b0ee42 100644 --- a/src/lp_data/HStruct.h +++ b/src/lp_data/HStruct.h @@ -144,4 +144,28 @@ struct HighsIllConditioning { void clear(); }; +struct HighsLinearObjective { + double weight; + double offset; + std::vector coefficients; + double abs_tolerance; + double rel_tolerance; + HighsInt priority; + void clear(); +}; + +struct HighsSimplexStats { + bool valid; + HighsInt iteration_count; + HighsInt num_invert; + HighsInt last_invert_num_el; + HighsInt last_factored_basis_num_el; + double col_aq_density; + double row_ep_density; + double row_ap_density; + double row_DSE_density; + void report(FILE* file, const std::string message = "") const; + void initialise(const HighsInt iteration_count_ = 0); +}; + #endif /* LP_DATA_HSTRUCT_H_ */ diff --git a/src/lp_data/Highs.cpp b/src/lp_data/Highs.cpp index c1c32546a6..6352a68a09 100644 --- a/src/lp_data/Highs.cpp +++ b/src/lp_data/Highs.cpp @@ -59,6 +59,7 @@ HighsStatus Highs::clear() { HighsStatus Highs::clearModel() { model_.clear(); + multi_linear_objective_.clear(); return clearSolver(); } @@ -577,6 +578,36 @@ HighsStatus Highs::passHessian(const HighsInt dim, const HighsInt num_nz, return passHessian(hessian); } +HighsStatus Highs::passLinearObjectives( + const HighsInt num_linear_objective, + const HighsLinearObjective* linear_objective) { + if (num_linear_objective < 0) return HighsStatus::kOk; + this->multi_linear_objective_.clear(); + for (HighsInt iObj = 0; iObj < num_linear_objective; iObj++) + if (this->addLinearObjective(linear_objective[iObj], iObj) != + HighsStatus::kOk) + return HighsStatus::kError; + return HighsStatus::kOk; +} + +HighsStatus Highs::addLinearObjective( + const HighsLinearObjective& linear_objective, const HighsInt iObj) { + if (model_.isQp()) { + highsLogUser(options_.log_options, HighsLogType::kError, + "Cannot define additional linear objective for QP\n"); + return HighsStatus::kError; + } + if (!this->validLinearObjective(linear_objective, iObj)) + return HighsStatus::kError; + this->multi_linear_objective_.push_back(linear_objective); + return HighsStatus::kOk; +} + +HighsStatus Highs::clearLinearObjectives() { + this->multi_linear_objective_.clear(); + return HighsStatus::kOk; +} + HighsStatus Highs::passColName(const HighsInt col, const std::string& name) { const HighsInt num_col = this->model_.lp_.num_col_; if (col < 0 || col >= num_col) { @@ -705,9 +736,32 @@ HighsStatus Highs::writePresolvedModel(const std::string& filename) { HighsStatus Highs::writeLocalModel(HighsModel& model, const std::string& filename) { HighsStatus return_status = HighsStatus::kOk; + HighsStatus call_status; + + HighsLp& lp = model.lp_; + // Dimensions in a_matrix_ may not be set, so take them from lp + lp.setMatrixDimensions(); // Ensure that the LP is column-wise - model.lp_.ensureColwise(); + lp.ensureColwise(); + + // Ensure that the dimensions are OK + if (!lpDimensionsOk("writeLocalModel", lp, options_.log_options)) + return HighsStatus::kError; + + if (model.hessian_.dim_ > 0) { + call_status = assessHessianDimensions(options_, model.hessian_); + if (call_status == HighsStatus::kError) return call_status; + } + + // Check that the matrix starts are OK + call_status = lp.a_matrix_.assessStart(options_.log_options); + if (call_status == HighsStatus::kError) return call_status; + + // Check that the matrix indices are within bounds + call_status = lp.a_matrix_.assessIndexBounds(options_.log_options); + if (call_status == HighsStatus::kError) return call_status; + // Check for repeated column or row names that would corrupt the file if (model.lp_.col_hash_.hasDuplicate(model.lp_.col_names_)) { highsLogUser(options_.log_options, HighsLogType::kError, @@ -853,9 +907,15 @@ HighsStatus Highs::presolve() { return returnFromHighs(return_status); } +HighsStatus Highs::run() { + HighsInt num_linear_objective = this->multi_linear_objective_.size(); + if (num_linear_objective == 0) return this->solve(); + return this->multiobjectiveSolve(); +} + // Checks the options calls presolve and postsolve if needed. Solvers are called // with callSolveLp(..) -HighsStatus Highs::run() { +HighsStatus Highs::solve() { HighsInt min_highs_debug_level = kHighsDebugLevelMin; // kHighsDebugLevelCostly; // kHighsDebugLevelMax; diff --git a/src/lp_data/HighsInfo.cpp b/src/lp_data/HighsInfo.cpp index ce01a585b8..2792f64550 100644 --- a/src/lp_data/HighsInfo.cpp +++ b/src/lp_data/HighsInfo.cpp @@ -292,7 +292,7 @@ void reportInfo(FILE* file, const InfoRecordInt64& info, fprintf(file, "\n# %s\n# [type: int64_t]\n%s = %" PRId64 "\n", info.description.c_str(), info.name.c_str(), *info.value); } else { - fprintf(file, "%s = %" PRId64 "\n", info.name.c_str(), *info.value); + fprintf(file, "%-30s = %" PRId64 "\n", info.name.c_str(), *info.value); } } @@ -306,7 +306,7 @@ void reportInfo(FILE* file, const InfoRecordInt& info, fprintf(file, "\n# %s\n# [type: HighsInt]\n%s = %" HIGHSINT_FORMAT "\n", info.description.c_str(), info.name.c_str(), *info.value); } else { - fprintf(file, "%s = %" HIGHSINT_FORMAT "\n", info.name.c_str(), + fprintf(file, "%-30s = %" HIGHSINT_FORMAT "\n", info.name.c_str(), *info.value); } } @@ -321,6 +321,6 @@ void reportInfo(FILE* file, const InfoRecordDouble& info, fprintf(file, "\n# %s\n# [type: double]\n%s = %g\n", info.description.c_str(), info.name.c_str(), *info.value); } else { - fprintf(file, "%s = %g\n", info.name.c_str(), *info.value); + fprintf(file, "%-30s = %g\n", info.name.c_str(), *info.value); } } diff --git a/src/lp_data/HighsInterface.cpp b/src/lp_data/HighsInterface.cpp index a40813d1fe..1c7ad16b9f 100644 --- a/src/lp_data/HighsInterface.cpp +++ b/src/lp_data/HighsInterface.cpp @@ -1755,7 +1755,6 @@ HighsStatus Highs::getDualRayInterface(bool& has_dual_ray, this->setOptionValue("presolve", kHighsOffString); this->setOptionValue("solve_relaxation", true); HighsStatus call_status = this->run(); - this->writeSolution("", kSolutionStylePretty); if (call_status != HighsStatus::kOk) return_status = call_status; has_dual_ray = ekk_instance_.status_.has_dual_ray; has_invert = ekk_instance_.status_.has_invert; @@ -3584,3 +3583,356 @@ bool Highs::infeasibleBoundsOk() { int(num_true_infeasible_bound)); return num_true_infeasible_bound == 0; } + +bool Highs::validLinearObjective(const HighsLinearObjective& linear_objective, + const HighsInt iObj) const { + HighsInt linear_objective_coefficients_size = + linear_objective.coefficients.size(); + if (linear_objective_coefficients_size != this->model_.lp_.num_col_) { + highsLogUser( + options_.log_options, HighsLogType::kError, + "Coefficient vector for linear objective %s has size %d != %d = " + "lp.num_col_\n", + iObj >= 0 ? std::to_string(iObj).c_str() : "", + int(linear_objective_coefficients_size), + int(this->model_.lp_.num_col_)); + return false; + } + if (!options_.blend_multi_objectives && + hasRepeatedLinearObjectivePriorities(&linear_objective)) { + highsLogUser( + options_.log_options, HighsLogType::kError, + "Repeated priorities for lexicographic optimization is illegal\n"); + return false; + } + return true; +} + +bool Highs::hasRepeatedLinearObjectivePriorities( + const HighsLinearObjective* linear_objective) const { + // Look for repeated values in the linear objective priorities, also + // comparing linear_objective if it's not a null pointer. Cost is + // O(n^2), but who will have more than O(1) linear objectives! + HighsInt num_linear_objective = this->multi_linear_objective_.size(); + if (num_linear_objective <= 0 || + num_linear_objective <= 1 && !linear_objective) + return false; + for (HighsInt iObj0 = 0; iObj0 < num_linear_objective; iObj0++) { + HighsInt priority0 = this->multi_linear_objective_[iObj0].priority; + for (HighsInt iObj1 = iObj0 + 1; iObj1 < num_linear_objective; iObj1++) { + HighsInt priority1 = this->multi_linear_objective_[iObj1].priority; + if (priority1 == priority0) return true; + } + if (linear_objective) { + if (linear_objective->priority == priority0) return true; + } + } + return false; +} + +bool comparison(std::pair x1, + std::pair x2) { + return x1.first >= x2.first; +} + +HighsStatus Highs::returnFromLexicographicOptimization( + HighsStatus return_status, HighsInt original_lp_num_row) { + // Save model_status_ and info_ since they are cleared by calling + // deleteRows + HighsModelStatus model_status = this->model_status_; + HighsInfo info = this->info_; + HighsInt num_linear_objective = this->multi_linear_objective_.size(); + if (num_linear_objective > 1) { + this->deleteRows(original_lp_num_row, this->model_.lp_.num_row_ - 1); + // Recover model_status_ and info_, and then account for lack of basis or + // dual solution + this->model_status_ = model_status; + this->info_ = info; + info_.objective_function_value = 0; + info_.basis_validity = kBasisValidityInvalid; + info_.dual_solution_status = kSolutionStatusNone; + info_.num_dual_infeasibilities = kHighsIllegalInfeasibilityCount; + info_.max_dual_infeasibility = kHighsIllegalInfeasibilityMeasure; + info_.sum_dual_infeasibilities = kHighsIllegalInfeasibilityMeasure; + info_.max_complementarity_violation = kHighsIllegalComplementarityViolation; + info_.sum_complementarity_violations = + kHighsIllegalComplementarityViolation; + this->solution_.value_valid = true; + this->model_.lp_.col_cost_.assign(this->model_.lp_.num_col_, 0); + } + return return_status; +} + +HighsStatus Highs::multiobjectiveSolve() { + const HighsInt coeff_logging_size_limit = 10; + HighsInt num_linear_objective = this->multi_linear_objective_.size(); + + assert(num_linear_objective > 0); + HighsLp& lp = this->model_.lp_; + for (HighsInt iObj = 0; iObj < num_linear_objective; iObj++) { + HighsLinearObjective& multi_linear_objective = + this->multi_linear_objective_[iObj]; + if (multi_linear_objective.coefficients.size() != + static_cast(lp.num_col_)) { + highsLogUser(options_.log_options, HighsLogType::kError, + "Multiple linear objective coefficient vector %d has size " + "incompatible with model\n", + int(iObj)); + return HighsStatus::kError; + } + } + + std::unique_ptr multi_objective_log; + highsLogUser(options_.log_options, HighsLogType::kInfo, + "Solving with %d multiple linear objectives, %s\n", + int(num_linear_objective), + this->options_.blend_multi_objectives + ? "blending objectives by weight" + : "using lexicographic optimization by priority"); + highsLogUser( + options_.log_options, HighsLogType::kInfo, + "Ix weight offset abs_tol rel_tol priority%s\n", + lp.num_col_ < coeff_logging_size_limit ? " coefficients" : ""); + for (HighsInt iObj = 0; iObj < num_linear_objective; iObj++) { + HighsLinearObjective& linear_objective = + this->multi_linear_objective_[iObj]; + multi_objective_log = + std::unique_ptr(new std::stringstream()); + *multi_objective_log << highsFormatToString( + "%2d %11.6g %11.6g %11.6g %11.6g %11d ", int(iObj), + linear_objective.weight, linear_objective.offset, + linear_objective.abs_tolerance, linear_objective.rel_tolerance, + linear_objective.priority); + if (lp.num_col_ < coeff_logging_size_limit) { + for (HighsInt iCol = 0; iCol < lp.num_col_; iCol++) + *multi_objective_log << highsFormatToString( + "%s c_{%1d} = %g", iCol == 0 ? "" : ",", int(iCol), + linear_objective.coefficients[iCol]); + } + *multi_objective_log << "\n"; + highsLogUser(options_.log_options, HighsLogType::kInfo, "%s", + multi_objective_log->str().c_str()); + } + this->clearSolver(); + if (this->options_.blend_multi_objectives) { + // Objectives are blended by weight and minimized + lp.offset_ = 0; + lp.col_cost_.assign(lp.num_col_, 0); + for (HighsInt iObj = 0; iObj < num_linear_objective; iObj++) { + HighsLinearObjective& multi_linear_objective = + this->multi_linear_objective_[iObj]; + lp.offset_ += + multi_linear_objective.weight * multi_linear_objective.offset; + for (HighsInt iCol = 0; iCol < lp.num_col_; iCol++) + lp.col_cost_[iCol] += multi_linear_objective.weight * + multi_linear_objective.coefficients[iCol]; + } + lp.sense_ = ObjSense::kMinimize; + + multi_objective_log = + std::unique_ptr(new std::stringstream()); + *multi_objective_log << highsFormatToString( + "Solving with blended objective"); + if (lp.num_col_ < coeff_logging_size_limit) { + *multi_objective_log << highsFormatToString( + ": %s %g", lp.sense_ == ObjSense::kMinimize ? "min" : "max", + lp.offset_); + for (HighsInt iCol = 0; iCol < lp.num_col_; iCol++) + *multi_objective_log << highsFormatToString( + " + (%g) x[%d]", lp.col_cost_[iCol], int(iCol)); + } + *multi_objective_log << "\n"; + highsLogUser(options_.log_options, HighsLogType::kInfo, "%s", + multi_objective_log->str().c_str()); + return this->solve(); + } + + // Objectives are applied lexicographically + if (model_.isQp() && num_linear_objective > 1) { + // Lexicographic optimization with a single linear objective is + // trivially standard optimization, so is OK + highsLogUser( + options_.log_options, HighsLogType::kError, + "Cannot perform non-trivial lexicographic optimization for QP\n"); + return HighsStatus::kError; + } + // Check whether there are repeated linear objective priorities + if (hasRepeatedLinearObjectivePriorities()) { + highsLogUser( + options_.log_options, HighsLogType::kError, + "Repeated priorities for lexicographic optimization is illegal\n"); + return HighsStatus::kError; + } + std::vector> priority_objective; + + for (HighsInt iObj = 0; iObj < num_linear_objective; iObj++) + priority_objective.push_back( + std::make_pair(this->multi_linear_objective_[iObj].priority, iObj)); + std::sort(priority_objective.begin(), priority_objective.end(), comparison); + // Clear LP objective + lp.offset_ = 0; + lp.col_cost_.assign(lp.num_col_, 0); + const HighsInt original_lp_num_row = lp.num_row_; + std::vector index(lp.num_col_); + std::vector value(lp.num_col_); + // Use the solution of one MIP to provide an integer feasible + // solution of the next + HighsSolution solution; + for (HighsInt iIx = 0; iIx < num_linear_objective; iIx++) { + HighsInt priority = priority_objective[iIx].first; + HighsInt iObj = priority_objective[iIx].second; + // Use this objective + HighsLinearObjective& linear_objective = + this->multi_linear_objective_[iObj]; + lp.offset_ = linear_objective.offset; + lp.col_cost_ = linear_objective.coefficients; + lp.sense_ = + linear_objective.weight > 0 ? ObjSense::kMinimize : ObjSense::kMaximize; + if (lp.isMip() && solution.value_valid) { + HighsStatus set_solution_status = this->setSolution(solution); + if (set_solution_status == HighsStatus::kError) { + highsLogUser(options_.log_options, HighsLogType::kError, + "Failure to use one MIP to provide an integer feasible " + "solution of the next\n"); + return returnFromLexicographicOptimization(HighsStatus::kError, + original_lp_num_row); + } + bool valid, integral, feasible; + HighsStatus assess_primal_solution = + assessPrimalSolution(valid, integral, feasible); + if (!valid || !integral || !feasible) { + highsLogUser(options_.log_options, HighsLogType::kWarning, + "Failure to use one MIP to provide an integer feasible " + "solution of the next: " + "status is valid = %s, integral = %s, feasible = %s\n", + highsBoolToString(valid).c_str(), + highsBoolToString(integral).c_str(), + highsBoolToString(feasible).c_str()); + } + } + multi_objective_log = + std::unique_ptr(new std::stringstream()); + *multi_objective_log << highsFormatToString("Solving with objective %d", + int(iObj)); + if (lp.num_col_ < coeff_logging_size_limit) { + *multi_objective_log << highsFormatToString( + ": %s %g", lp.sense_ == ObjSense::kMinimize ? "min" : "max", + lp.offset_); + for (HighsInt iCol = 0; iCol < lp.num_col_; iCol++) + *multi_objective_log << highsFormatToString( + " + (%g) x[%d]", lp.col_cost_[iCol], int(iCol)); + } + *multi_objective_log << "\n"; + highsLogUser(options_.log_options, HighsLogType::kInfo, "%s", + multi_objective_log->str().c_str()); + HighsStatus solve_status = this->solve(); + if (solve_status == HighsStatus::kError) + return returnFromLexicographicOptimization(HighsStatus::kError, + original_lp_num_row); + if (model_status_ != HighsModelStatus::kOptimal) { + highsLogUser(options_.log_options, HighsLogType::kWarning, + "After priority %d solve, model status is %s\n", + int(priority), modelStatusToString(model_status_).c_str()); + return returnFromLexicographicOptimization(HighsStatus::kWarning, + original_lp_num_row); + } + if (iIx == num_linear_objective - 1) break; + if (lp.isMip()) { + // Save the solution to provide an integer feasible solution of + // the next MIP + solution.col_value = this->solution_.col_value; + solution.value_valid = true; + } + // Add the constraint + HighsInt nnz = 0; + for (HighsInt iCol = 0; iCol < lp.num_col_; iCol++) { + if (lp.col_cost_[iCol]) { + index[nnz] = iCol; + value[nnz] = lp.col_cost_[iCol]; + nnz++; + } + } + double objective = info_.objective_function_value; + HighsStatus add_row_status; + double lower_bound = -kHighsInf; + double upper_bound = kHighsInf; + if (lp.sense_ == ObjSense::kMinimize) { + // Minimizing, so set a greater upper bound than the objective + if (linear_objective.abs_tolerance >= 0) + upper_bound = objective + linear_objective.abs_tolerance; + if (linear_objective.rel_tolerance >= 0) { + if (objective >= 0) { + // Guarantees objective of at least (1+t).f^* + // + // so ((1+t).f^*-f^*)/f^* = t + upper_bound = std::min( + objective * (1.0 + linear_objective.rel_tolerance), upper_bound); + } else if (objective < 0) { + // Guarantees objective of at least (1-t).f^* + // + // so ((1-t).f^*-f^*)/f^* = -t + upper_bound = std::min( + objective * (1.0 - linear_objective.rel_tolerance), upper_bound); + } + } + upper_bound -= lp.offset_; + } else { + // Maximizing, so set a lesser lower bound than the objective + if (linear_objective.abs_tolerance >= 0) + lower_bound = objective - linear_objective.abs_tolerance; + if (linear_objective.rel_tolerance >= 0) { + if (objective >= 0) { + // Guarantees objective of at most (1-t).f^* + // + // so ((1-t).f^*-f^*)/f^* = -t + lower_bound = std::max( + objective * (1.0 - linear_objective.rel_tolerance), lower_bound); + } else if (objective < 0) { + // Guarantees objective of at least (1+t).f^* + // + // so ((1+t).f^*-f^*)/f^* = t + lower_bound = std::max( + objective * (1.0 + linear_objective.rel_tolerance), lower_bound); + } + } + lower_bound -= lp.offset_; + } + if (lower_bound == -kHighsInf && upper_bound == kHighsInf) + highsLogUser(options_.log_options, HighsLogType::kWarning, + "After priority %d solve, no objective constraint due to " + "absolute tolerance being %g < 0," + " and relative tolerance being %g < 0\n", + int(priority), linear_objective.abs_tolerance, + linear_objective.rel_tolerance); + multi_objective_log = + std::unique_ptr(new std::stringstream()); + *multi_objective_log << highsFormatToString( + "Add constraint for objective %d: ", int(iObj)); + if (nnz < coeff_logging_size_limit) { + *multi_objective_log << highsFormatToString("%g <= ", lower_bound); + for (HighsInt iEl = 0; iEl < nnz; iEl++) + *multi_objective_log << highsFormatToString( + "%s(%g) x[%d]", iEl > 0 ? " + " : "", value[iEl], int(index[iEl])); + *multi_objective_log << highsFormatToString(" <= %g\n", upper_bound); + } else { + *multi_objective_log << highsFormatToString("Bounds [%g, %g]\n", + lower_bound, upper_bound); + } + highsLogUser(options_.log_options, HighsLogType::kInfo, "%s", + multi_objective_log->str().c_str()); + add_row_status = + this->addRow(lower_bound, upper_bound, nnz, index.data(), value.data()); + assert(add_row_status == HighsStatus::kOk); + } + return returnFromLexicographicOptimization(HighsStatus::kOk, + original_lp_num_row); +} + +void HighsLinearObjective::clear() { + this->weight = 0.0; + this->offset = 0.0; + this->coefficients.clear(); + this->abs_tolerance = 0.0; + this->rel_tolerance = 0.0; + this->priority = 0; +} diff --git a/src/lp_data/HighsLpUtils.cpp b/src/lp_data/HighsLpUtils.cpp index 89b31e04aa..af8c39c822 100644 --- a/src/lp_data/HighsLpUtils.cpp +++ b/src/lp_data/HighsLpUtils.cpp @@ -130,7 +130,7 @@ bool lpDimensionsOk(std::string message, const HighsLp& lp, HighsInt col_upper_size = lp.col_upper_.size(); bool legal_col_cost_size = col_cost_size >= num_col; bool legal_col_lower_size = col_lower_size >= num_col; - bool legal_col_upper_size = col_lower_size >= num_col; + bool legal_col_upper_size = col_upper_size >= num_col; if (!legal_col_cost_size) highsLogUser(log_options, HighsLogType::kError, "LP dimension validation (%s) fails on col_cost.size() = %d < " diff --git a/src/lp_data/HighsOptions.h b/src/lp_data/HighsOptions.h index cfb395a3fc..82424a0adc 100644 --- a/src/lp_data/HighsOptions.h +++ b/src/lp_data/HighsOptions.h @@ -348,6 +348,9 @@ struct HighsOptionsStruct { // Options for IIS calculation HighsInt iis_strategy; + // Option for multi-objective optimization + bool blend_multi_objectives; + // Advanced options HighsInt log_dev_level; bool log_githash; @@ -485,6 +488,8 @@ struct HighsOptionsStruct { pdlp_d_gap_tol(0.0), qp_iteration_limit(0), qp_nullspace_limit(0), + iis_strategy(0), + blend_multi_objectives(false), log_dev_level(0), log_githash(false), solve_relaxation(false), @@ -1120,6 +1125,12 @@ class HighsOptions : public HighsOptionsStruct { kIisStrategyMax); records.push_back(record_int); + record_bool = new OptionRecordBool( + "blend_multi_objectives", + "Blend multiple objectives or apply lexicographically: Default = true", + advanced, &blend_multi_objectives, true); + records.push_back(record_bool); + // Fix the number of user settable options num_user_settable_options_ = static_cast(records.size()); diff --git a/src/lp_data/HighsSolve.cpp b/src/lp_data/HighsSolve.cpp index f1df5239a3..118b92c54a 100644 --- a/src/lp_data/HighsSolve.cpp +++ b/src/lp_data/HighsSolve.cpp @@ -17,7 +17,7 @@ #include "pdlp/CupdlpWrapper.h" #include "simplex/HApp.h" -// The method below runs simplex or ipx solver on the lp. +// The method below runs simplex, ipx or pdlp solver on the lp. HighsStatus solveLp(HighsLpSolverObject& solver_object, const string message) { HighsStatus return_status = HighsStatus::kOk; HighsStatus call_status; diff --git a/src/mip/HighsCutGeneration.cpp b/src/mip/HighsCutGeneration.cpp index 1417c777b7..be210324a5 100644 --- a/src/mip/HighsCutGeneration.cpp +++ b/src/mip/HighsCutGeneration.cpp @@ -523,12 +523,7 @@ bool HighsCutGeneration::cmirCutGenerationHeuristic(double minEfficacy, if (isintegral[i]) { integerinds.push_back(i); - if (upper[i] < 2 * solval[i]) { - complementation[i] = 1 - complementation[i]; - rhs -= upper[i] * vals[i]; - vals[i] = -vals[i]; - solval[i] = upper[i] - solval[i]; - } + if (upper[i] < 2 * solval[i]) flipComplementation(i); if (onlyInitialCMIRScale) continue; @@ -539,11 +534,8 @@ bool HighsCutGeneration::cmirCutGenerationHeuristic(double minEfficacy, deltas.push_back(delta); } } else { - continuouscontribution += vals[i] * solval[i]; - - if (vals[i] > 0 && solval[i] <= feastol) continue; - if (vals[i] < 0 && solval[i] >= upper[i] - feastol) continue; - continuoussqrnorm += vals[i] * vals[i]; + updateViolationAndNorm(i, vals[i], continuouscontribution, + continuoussqrnorm); } } @@ -596,12 +588,7 @@ bool HighsCutGeneration::cmirCutGenerationHeuristic(double minEfficacy, double downaj = fast_floor(scalaj + kHighsTiny); double fj = scalaj - downaj; double aj = downaj + std::max(0.0, fj - f0); - - viol += aj * solval[j]; - - if (aj > 0 && solval[j] <= feastol) continue; - if (aj < 0 && solval[j] >= upper[j] - feastol) continue; - sqrnorm += aj * aj; + updateViolationAndNorm(j, aj, viol, sqrnorm); } double efficacy = viol / sqrt(sqrnorm); @@ -633,12 +620,7 @@ bool HighsCutGeneration::cmirCutGenerationHeuristic(double minEfficacy, double downaj = fast_floor(scalaj + kHighsTiny); double fj = scalaj - downaj; double aj = downaj + std::max(0.0, fj - f0); - - viol += aj * solval[j]; - - if (aj > 0 && solval[j] <= feastol) continue; - if (aj < 0 && solval[j] >= upper[j] - feastol) continue; - sqrnorm += aj * aj; + updateViolationAndNorm(j, aj, viol, sqrnorm); } double efficacy = viol / sqrt(sqrnorm); @@ -655,10 +637,7 @@ bool HighsCutGeneration::cmirCutGenerationHeuristic(double minEfficacy, if (upper[k] == kHighsInf) continue; if (solval[k] <= feastol) continue; - complementation[k] = 1 - complementation[k]; - solval[k] = upper[k] - solval[k]; - rhs -= upper[k] * vals[k]; - vals[k] = -vals[k]; + flipComplementation(k); double delta = bestdelta; double scale = 1.0 / delta; @@ -667,20 +646,13 @@ bool HighsCutGeneration::cmirCutGenerationHeuristic(double minEfficacy, double f0 = scalrhs - downrhs; if (f0 < f0min || f0 > f0max) { - complementation[k] = 1 - complementation[k]; - solval[k] = upper[k] - solval[k]; - rhs -= upper[k] * vals[k]; - vals[k] = -vals[k]; - + flipComplementation(k); continue; } double oneoveroneminusf0 = 1.0 / (1.0 - f0); if (oneoveroneminusf0 > maxCMirScale) { - complementation[k] = 1 - complementation[k]; - solval[k] = upper[k] - solval[k]; - rhs -= upper[k] * vals[k]; - vals[k] = -vals[k]; + flipComplementation(k); continue; } @@ -692,22 +664,14 @@ bool HighsCutGeneration::cmirCutGenerationHeuristic(double minEfficacy, double downaj = fast_floor(scalaj + kHighsTiny); double fj = scalaj - downaj; double aj = downaj + std::max(0.0, fj - f0); - - viol += aj * solval[j]; - - if (aj > 0 && solval[j] <= feastol) continue; - if (aj < 0 && solval[j] >= upper[j] - feastol) continue; - sqrnorm += aj * aj; + updateViolationAndNorm(j, aj, viol, sqrnorm); } double efficacy = viol / sqrt(sqrnorm); if (efficacy > bestefficacy) { bestefficacy = efficacy; } else { - complementation[k] = 1 - complementation[k]; - solval[k] = upper[k] - solval[k]; - rhs -= upper[k] * vals[k]; - vals[k] = -vals[k]; + flipComplementation(k); } } @@ -941,12 +905,12 @@ bool HighsCutGeneration::preprocessBaseInequality(bool& hasUnboundedInts, lpRelaxation.isColIntegral(inds[i]) && std::abs(vals[i]) > 10 * feastol; if (!isintegral[i]) { - if (upper[i] - solval[i] < solval[i]) { + // complement non-integer variable (cmir separation heuristic complements + // integral variables in the same way) + if (upper[i] < 2 * solval[i]) { if (complementation.empty()) complementation.resize(rowlen); - complementation[i] = 1 - complementation[i]; - rhs -= upper[i] * vals[i]; - vals[i] = -vals[i]; + flipComplementation(i); } // relax positive continuous variables and those with small contributions @@ -1144,115 +1108,17 @@ bool HighsCutGeneration::generateCut(HighsTransformedLp& transLp, for (HighsInt i = 0; i != rowlen; ++i) { if (vals[i] > 0 || !isintegral[i]) continue; - complementation[i] = 1 - complementation[i]; - rhs -= upper[i] * vals[i]; - vals[i] = -vals[i]; - solval[i] = upper[i] - solval[i]; + flipComplementation(i); } } - const double minEfficacy = 10 * feastol; - - if (hasUnboundedInts) { - if (!cmirCutGenerationHeuristic(minEfficacy, onlyInitialCMIRScale)) - return false; - } else { - // 1. Determine a cover, cover does not need to be minimal as neither of - // the - // lifting functions have minimality of the cover as necessary facet - // condition - std::vector tmpVals(vals, vals + rowlen); - std::vector tmpInds(inds, inds + rowlen); - HighsCDouble tmpRhs = rhs; - bool success = false; - do { - if (!determineCover()) break; - - // 2. use superadditive lifting function depending on structure of base - // inequality: - // We have 3 lifting functions available for pure binary knapsack sets, - // for mixed-binary knapsack sets and for mixed integer knapsack sets. - if (!hasContinuous && !hasGeneralInts) { - separateLiftedKnapsackCover(); - success = true; - } else if (hasGeneralInts) { - success = separateLiftedMixedIntegerCover(); - } else { - assert(hasContinuous); - assert(!hasGeneralInts); - success = separateLiftedMixedBinaryCover(); - } - } while (false); - - double minMirEfficacy = minEfficacy; - if (success) { - double violation = -double(rhs); - double sqrnorm = 0.0; - - for (HighsInt i = 0; i < rowlen; ++i) { - violation += vals[i] * solval[i]; - - if (vals[i] > 0 && solval[i] <= feastol) continue; - if (vals[i] < 0 && solval[i] >= upper[i] - feastol) continue; - sqrnorm += vals[i] * vals[i]; - } - - double efficacy = violation / std::sqrt(sqrnorm); - if (efficacy <= minEfficacy) { - success = false; - rhs = tmpRhs; - } else { - minMirEfficacy += efficacy; - if (!complementation.empty()) { - // remove the complementation if it exists, so that the values stored - // values are uncomplemented - for (HighsInt i = 0; i != rowlen; ++i) { - if (complementation[i]) { - rhs -= upper[i] * vals[i]; - vals[i] = -vals[i]; - solval[i] = upper[i] - solval[i]; - } - } - } - std::swap(tmpRhs, rhs); - } - } - - inds = tmpInds.data(); - vals = tmpVals.data(); - - bool cmirSuccess = - cmirCutGenerationHeuristic(minMirEfficacy, onlyInitialCMIRScale); - - if (cmirSuccess) { - // take the cmir cut as it is better - inds_.swap(tmpInds); - vals_.swap(tmpVals); - inds = inds_.data(); - vals = vals_.data(); - } else if (success) { - // take the previous lifted cut as cmir could not improve - // as we already removed the complementation we simply clear - // the vector if altered by the cmir routine and restore the old - // right hand side and values - rhs = tmpRhs; - complementation.clear(); - inds = inds_.data(); - vals = vals_.data(); - } else - // neither cmir nor lifted cut successful - return false; - } + // try to generate a cut + if (!tryGenerateCut(inds_, vals_, hasUnboundedInts, hasGeneralInts, + hasContinuous, 10 * feastol, onlyInitialCMIRScale)) + return false; - if (!complementation.empty()) { - // remove the complementation if exists - for (HighsInt i = 0; i != rowlen; ++i) { - if (complementation[i]) { - rhs -= upper[i] * vals[i]; - vals[i] = -vals[i]; - } - } - } + // remove the complementation if exists + removeComplementation(); // remove zeros in place for (HighsInt i = rowlen - 1; i >= 0; --i) { @@ -1364,77 +1230,10 @@ bool HighsCutGeneration::generateConflict(HighsDomain& localdomain, hasContinuous)) return false; - if (hasUnboundedInts) { - if (!cmirCutGenerationHeuristic(feastol)) return false; - } else { - // 1. Determine a cover, cover does not need to be minimal as neither of - // the - // lifting functions have minimality of the cover as necessary facet - // condition - std::vector tmpVals(vals, vals + rowlen); - std::vector tmpInds(inds, inds + rowlen); - std::vector tmpComplementation(complementation); - HighsCDouble tmpRhs = rhs; - bool success = false; - do { - if (!determineCover(false)) break; - - // 2. use superadditive lifting function depending on structure of base - // inequality: - // We have 3 lifting functions available for pure binary knapsack sets, - // for mixed-binary knapsack sets and for mixed integer knapsack sets. - if (!hasContinuous && !hasGeneralInts) { - separateLiftedKnapsackCover(); - success = true; - } else if (hasGeneralInts) { - success = separateLiftedMixedIntegerCover(); - } else { - assert(hasContinuous); - assert(!hasGeneralInts); - success = separateLiftedMixedBinaryCover(); - } - } while (false); - - double minEfficacy = feastol; - if (success) { - double violation = -double(rhs); - double sqrnorm = 0.0; - - for (HighsInt i = 0; i < rowlen; ++i) { - violation += vals[i] * solval[i]; - - if (vals[i] > 0 && solval[i] <= feastol) continue; - if (vals[i] < 0 && solval[i] >= upper[i] - feastol) continue; - sqrnorm += vals[i] * vals[i]; - } - - minEfficacy = violation / std::sqrt(sqrnorm); - minEfficacy += feastol; - std::swap(tmpRhs, rhs); - } - - inds = tmpInds.data(); - vals = tmpVals.data(); - - bool cmirSuccess = cmirCutGenerationHeuristic(minEfficacy); - - if (cmirSuccess) { - // take the cmir cut as it is better - proofinds.swap(tmpInds); - proofvals.swap(tmpVals); - inds = proofinds.data(); - vals = proofvals.data(); - } else if (success) { - // take the previous lifted cut as cmir could not improve - // we restore the old complementation vector, right hand side, and values - rhs = tmpRhs; - complementation.swap(tmpComplementation); - inds = proofinds.data(); - vals = proofvals.data(); - } else - // neither cmir nor lifted cut successful - return false; - } + // try to generate a cut + if (!tryGenerateCut(proofinds, proofvals, hasUnboundedInts, hasGeneralInts, + hasContinuous, feastol, false, false, false)) + return false; // remove the complementation if (!complementation.empty()) { @@ -1526,3 +1325,134 @@ bool HighsCutGeneration::finalizeAndAddCut(std::vector& inds_, // of a cut already in the pool return cutindex != -1; } + +void HighsCutGeneration::flipComplementation(HighsInt index) { + // only variables with finite upper bounds can be complemented + assert(upper[index] != kHighsInf); + + // flip complementation + complementation[index] = 1 - complementation[index]; + solval[index] = upper[index] - solval[index]; + rhs -= upper[index] * vals[index]; + vals[index] = -vals[index]; +} + +void HighsCutGeneration::removeComplementation() { + // remove the complementation if it exists + if (complementation.empty()) return; + for (HighsInt i = 0; i != rowlen; ++i) { + if (complementation[i]) flipComplementation(i); + } +} + +void HighsCutGeneration::updateViolationAndNorm(HighsInt index, double aj, + double& violation, + double& norm) const { + // update violation + violation += aj * solval[index]; + + // update norm + if (aj > 0 && solval[index] <= feastol) return; + if (aj < 0 && solval[index] >= upper[index] - feastol) return; + norm += aj * aj; +} + +bool HighsCutGeneration::tryGenerateCut(std::vector& inds_, + std::vector& vals_, + bool hasUnboundedInts, + bool hasGeneralInts, bool hasContinuous, + double minEfficacy, + bool onlyInitialCMIRScale, + bool allowRejectCut, bool lpSol) { + // use cmir if there are unbounded integer variables + if (hasUnboundedInts) + return cmirCutGenerationHeuristic(minEfficacy, onlyInitialCMIRScale); + + // 0. Save data before determining cover and applying lifting functions + std::vector tmpVals(vals, vals + rowlen); + std::vector tmpInds(inds, inds + rowlen); + std::vector tmpComplementation(complementation); + std::vector tmpSolval(solval); + HighsCDouble tmpRhs = rhs; + + // 1. Determine a cover, cover does not need to be minimal as neither of + // the lifting functions have minimality of the cover as necessary facet + // condition + bool success = false; + do { + if (!determineCover(lpSol)) break; + + // 2. use superadditive lifting function depending on structure of base + // inequality: + // We have 3 lifting functions available for pure binary knapsack sets, + // for mixed-binary knapsack sets and for mixed integer knapsack sets. + if (!hasContinuous && !hasGeneralInts) { + separateLiftedKnapsackCover(); + success = true; + } else if (hasGeneralInts) { + success = separateLiftedMixedIntegerCover(); + } else { + assert(hasContinuous); + assert(!hasGeneralInts); + success = separateLiftedMixedBinaryCover(); + } + } while (false); + + double minMirEfficacy = minEfficacy; + if (success) { + // compute violation and squared norm + double violation = -double(rhs); + double sqrnorm = 0.0; + for (HighsInt i = 0; i < rowlen; ++i) { + updateViolationAndNorm(i, vals[i], violation, sqrnorm); + } + + // compute efficacy (distance cut off) + double efficacy = violation / std::sqrt(sqrnorm); + if (allowRejectCut && efficacy <= minEfficacy) { + // reject cut + success = false; + rhs = tmpRhs; + } else { + // accept cut and increase minimum efficiency requirement for cmir cut + minMirEfficacy += efficacy; + std::swap(tmpRhs, rhs); + } + } + + // restore indices and values; lifting methods do not modify complementation + // and, thus, complementation-related data does not have to be restored here. + inds = tmpInds.data(); + vals = tmpVals.data(); + + // save data that might otherwise be overwritten when calling the cmir + // separator + bool saveIntegalSupport = integralSupport; + bool saveIntegralCoefficients = integralCoefficients; + + if (cmirCutGenerationHeuristic(minMirEfficacy, onlyInitialCMIRScale)) { + // take the cmir cut as it is better + inds_.swap(tmpInds); + vals_.swap(tmpVals); + inds = inds_.data(); + vals = vals_.data(); + return true; + } else if (success) { + // take the previous lifted cut as cmir could not improve + // we restore the old complementation vector, right hand side, and values + rhs = tmpRhs; + // note that the solution vector solval also needs to be restored because it + // depends on the complementation. it would be OK not to restore solval, if + // there would be a guarantee that it is not used from here on. + complementation.swap(tmpComplementation); + solval.swap(tmpSolval); + inds = inds_.data(); + vals = vals_.data(); + // restore indicators + integralSupport = saveIntegalSupport; + integralCoefficients = saveIntegralCoefficients; + return true; + } else + // neither cmir nor lifted cut successful + return false; +} diff --git a/src/mip/HighsCutGeneration.h b/src/mip/HighsCutGeneration.h index cc18998c6d..dc45924e6a 100644 --- a/src/mip/HighsCutGeneration.h +++ b/src/mip/HighsCutGeneration.h @@ -73,6 +73,19 @@ class HighsCutGeneration { bool preprocessBaseInequality(bool& hasUnboundedInts, bool& hasGeneralInts, bool& hasContinuous); + void flipComplementation(HighsInt index); + + void removeComplementation(); + + void updateViolationAndNorm(HighsInt index, double aj, double& violation, + double& norm) const; + + bool tryGenerateCut(std::vector& inds, std::vector& vals, + bool hasUnboundedInts, bool hasGeneralInts, + bool hasContinuous, double minEfficacy, + bool onlyInitialCMIRScale = false, + bool allowRejectCut = true, bool lpSol = true); + public: HighsCutGeneration(const HighsLpRelaxation& lpRelaxation, HighsCutPool& cutpool); diff --git a/src/mip/HighsMipSolverData.cpp b/src/mip/HighsMipSolverData.cpp index e84f9b5396..9130f335db 100644 --- a/src/mip/HighsMipSolverData.cpp +++ b/src/mip/HighsMipSolverData.cpp @@ -1866,9 +1866,11 @@ void HighsMipSolverData::evaluateRootNode() { mipsolver.analysis_.mipTimerStop(kMipClockStartSymmetryDetection); } if (compute_analytic_centre && !analyticCenterComputed) { - highsLogUser(mipsolver.options_mip_->log_options, HighsLogType::kInfo, - "MIP-Timing: %11.2g - starting analytic centre calculation\n", - mipsolver.timer_.read()); + if (mipsolver.analysis_.analyse_mip_time) + highsLogUser( + mipsolver.options_mip_->log_options, HighsLogType::kInfo, + "MIP-Timing: %11.2g - starting analytic centre calculation\n", + mipsolver.timer_.read()); mipsolver.analysis_.mipTimerStart(kMipClockStartAnalyticCentreComputation); startAnalyticCenterComputation(tg); mipsolver.analysis_.mipTimerStop(kMipClockStartAnalyticCentreComputation); @@ -2501,7 +2503,8 @@ bool HighsMipSolverData::interruptFromCallbackWithData( return mipsolver.callback_->callbackAction(callback_type, message); } -double possInfRelDiff(const double v0, const double v1, const double den) { +static double possInfRelDiff(const double v0, const double v1, + const double den) { double rel_diff; if (std::fabs(v0) == kHighsInf) { if (std::fabs(v1) == kHighsInf) { diff --git a/src/mip/HighsPathSeparator.cpp b/src/mip/HighsPathSeparator.cpp index 0989bd098c..4bbc0e94f8 100644 --- a/src/mip/HighsPathSeparator.cpp +++ b/src/mip/HighsPathSeparator.cpp @@ -351,16 +351,17 @@ void HighsPathSeparator::separateLpSolution(HighsLpRelaxation& lpRelaxation, if (addedSubstitutionRows) continue; + // generate cut double rhs = 0; - success = cutGen.generateCut(transLp, baseRowInds, baseRowVals, rhs); lpAggregator.getCurrentAggregation(baseRowInds, baseRowVals, true); if (!aggregatedPath.empty() || bestOutArcCol != -1 || bestInArcCol != -1) aggregatedPath.emplace_back(baseRowInds, baseRowVals); - rhs = 0; + // generate reverse cut + rhs = 0; success |= cutGen.generateCut(transLp, baseRowInds, baseRowVals, rhs); if (success || (bestOutArcCol == -1 && bestInArcCol == -1)) break; diff --git a/src/mip/HighsPrimalHeuristics.cpp b/src/mip/HighsPrimalHeuristics.cpp index 5908f4373c..1c4aff5daf 100644 --- a/src/mip/HighsPrimalHeuristics.cpp +++ b/src/mip/HighsPrimalHeuristics.cpp @@ -37,10 +37,10 @@ HighsPrimalHeuristics::HighsPrimalHeuristics(HighsMipSolver& mipsolver) : mipsolver(mipsolver), - lp_iterations(0), total_repair_lp(0), total_repair_lp_feasible(0), total_repair_lp_iterations(0), + lp_iterations(0), randgen(mipsolver.options_mip_->random_seed) { successObservations = 0; numSuccessObservations = 0; diff --git a/src/mip/HighsTransformedLp.cpp b/src/mip/HighsTransformedLp.cpp index 16b529a4cb..a11e25c3e9 100644 --- a/src/mip/HighsTransformedLp.cpp +++ b/src/mip/HighsTransformedLp.cpp @@ -132,33 +132,44 @@ bool HighsTransformedLp::transform(std::vector& vals, std::vector& solval, std::vector& inds, double& rhs, bool& integersPositive, bool preferVbds) { + // vector sum should be empty + assert(vectorsum.getNonzeros().empty()); + HighsCDouble tmpRhs = rhs; const HighsMipSolver& mip = lprelaxation.getMipSolver(); const HighsInt slackOffset = lprelaxation.numCols(); HighsInt numNz = inds.size(); - bool removeZeros = false; - for (HighsInt i = 0; i != numNz; ++i) { + auto getLb = [&](HighsInt col) { + return (col < slackOffset ? mip.mipdata_->domain.col_lower_[col] + : lprelaxation.slackLower(col - slackOffset)); + }; + + auto getUb = [&](HighsInt col) { + return (col < slackOffset ? mip.mipdata_->domain.col_upper_[col] + : lprelaxation.slackUpper(col - slackOffset)); + }; + + auto remove = [&](HighsInt position) { + numNz--; + inds[position] = inds[numNz]; + vals[position] = vals[numNz]; + inds[numNz] = 0; + vals[numNz] = 0; + }; + + HighsInt i = 0; + while (i < numNz) { HighsInt col = inds[i]; - double lb; - double ub; - - if (col < slackOffset) { - lb = mip.mipdata_->domain.col_lower_[col]; - ub = mip.mipdata_->domain.col_upper_[col]; - } else { - HighsInt row = col - slackOffset; - lb = lprelaxation.slackLower(row); - ub = lprelaxation.slackUpper(row); - } + double lb = getLb(col); + double ub = getUb(col); if (ub - lb < mip.options_mip_->small_matrix_value) { tmpRhs -= std::min(lb, ub) * vals[i]; - vals[i] = 0.0; - removeZeros = true; + remove(i); continue; } @@ -179,32 +190,31 @@ bool HighsTransformedLp::transform(std::vector& vals, BoundType oldBoundType = boundTypes[col]; if (lprelaxation.isColIntegral(col)) { - if (lb == -kHighsInf || ub == kHighsInf) integersPositive = false; - bool useVbd = false; - if (ub - lb > 1.5 && boundDist[col] == 0.0 && simpleLbDist[col] != 0 && - simpleUbDist[col] != 0) { - if (bestVlb[col].first == -1 || - ubDist[col] < lbDist[col] - mip.mipdata_->feastol) { - assert(bestVub[col].first != -1); - boundTypes[col] = BoundType::kVariableUb; - useVbd = true; - } else if (bestVub[col].first == -1 || - lbDist[col] < ubDist[col] - mip.mipdata_->feastol) { - assert(bestVlb[col].first != -1); - boundTypes[col] = BoundType::kVariableLb; - useVbd = true; - } else if (vals[i] > 0) { - assert(bestVub[col].first != -1); - boundTypes[col] = BoundType::kVariableUb; - useVbd = true; - } else { - assert(bestVlb[col].first != -1); - boundTypes[col] = BoundType::kVariableLb; - useVbd = true; - } + if (ub - lb <= 1.5 || boundDist[col] != 0.0 || simpleLbDist[col] == 0 || + simpleUbDist[col] == 0) { + // since we skip the handling of variable bound constraints for all + // binary and some general-integer variables, the bound type used should + // be a simple lower or upper bound + assert(oldBoundType == BoundType::kSimpleLb || + oldBoundType == BoundType::kSimpleUb); + i++; + continue; + } + if (bestVlb[col].first == -1 || + ubDist[col] < lbDist[col] - mip.mipdata_->feastol) { + assert(bestVub[col].first != -1); + boundTypes[col] = BoundType::kVariableUb; + } else if (bestVub[col].first == -1 || + lbDist[col] < ubDist[col] - mip.mipdata_->feastol) { + assert(bestVlb[col].first != -1); + boundTypes[col] = BoundType::kVariableLb; + } else if (vals[i] > 0) { + assert(bestVub[col].first != -1); + boundTypes[col] = BoundType::kVariableUb; + } else { + assert(bestVlb[col].first != -1); + boundTypes[col] = BoundType::kVariableLb; } - - if (!useVbd) continue; } else { if (lbDist[col] < ubDist[col] - mip.mipdata_->feastol) { if (bestVlb[col].first == -1) @@ -242,18 +252,20 @@ bool HighsTransformedLp::transform(std::vector& vals, switch (boundTypes[col]) { case BoundType::kSimpleLb: if (vals[i] > 0) { + // relax away using lower bound tmpRhs -= lb * vals[i]; - vals[i] = 0.0; - removeZeros = true; boundTypes[col] = oldBoundType; + remove(i); + continue; } break; case BoundType::kSimpleUb: if (vals[i] < 0) { + // relax away using upper bound tmpRhs -= ub * vals[i]; - vals[i] = 0.0; - removeZeros = true; boundTypes[col] = oldBoundType; + remove(i); + continue; } break; case BoundType::kVariableLb: @@ -261,7 +273,8 @@ bool HighsTransformedLp::transform(std::vector& vals, vectorsum.add(bestVlb[col].first, vals[i] * bestVlb[col].second.coef); if (vals[i] > 0) { boundTypes[col] = oldBoundType; - vals[i] = 0; + remove(i); + continue; } break; case BoundType::kVariableUb: @@ -270,9 +283,12 @@ bool HighsTransformedLp::transform(std::vector& vals, vals[i] = -vals[i]; if (vals[i] > 0) { boundTypes[col] = oldBoundType; - vals[i] = 0; + remove(i); + continue; } } + // move to next element + i++; } if (!vectorsum.getNonzeros().empty()) { @@ -282,9 +298,7 @@ bool HighsTransformedLp::transform(std::vector& vals, double maxError = 0.0; auto IsZero = [&](HighsInt col, double val) { - double absval = std::abs(val); - if (absval <= mip.options_mip_->small_matrix_value) return true; - + if (std::abs(val) <= mip.options_mip_->small_matrix_value) return true; return false; }; @@ -301,38 +315,39 @@ bool HighsTransformedLp::transform(std::vector& vals, for (HighsInt j = 0; j != numNz; ++j) vals[j] = vectorsum.getValue(inds[j]); vectorsum.clear(); - } else if (removeZeros) { - for (HighsInt i = numNz - 1; i >= 0; --i) { - if (vals[i] == 0) { - --numNz; - vals[i] = vals[numNz]; - inds[i] = inds[numNz]; - std::swap(vals[i], vals[numNz]); - std::swap(inds[i], inds[numNz]); - } - } - + } else { vals.resize(numNz); inds.resize(numNz); } - if (integersPositive) { - // complement integers to make coefficients positive - for (HighsInt j = 0; j != numNz; ++j) { - HighsInt col = inds[j]; - if (!lprelaxation.isColIntegral(inds[j])) continue; + for (HighsInt j = 0; j != numNz; ++j) { + HighsInt col = inds[j]; + + // get bounds + double lb = getLb(col); + double ub = getUb(col); + + // variable should not be free + assert(lb != -kHighsInf || ub != kHighsInf); + + // set bound type for previously unprocessed integer-constrained variables + if (!lprelaxation.isColIntegral(col)) continue; + + // do not overwrite bound type for integral slacks from vlb / vub + // constraints + if (boundTypes[col] == BoundType::kVariableLb || + boundTypes[col] == BoundType::kVariableUb) + continue; - if (vals[j] > 0) + // complement integers to make coefficients positive if both bounds are + // finite; otherwise, complement integers with closest bound. + // take into account 'integersPositive' provided by caller. + if (integersPositive) { + if ((lb != -kHighsInf && vals[j] > 0) || ub == kHighsInf) boundTypes[col] = BoundType::kSimpleLb; else boundTypes[col] = BoundType::kSimpleUb; - } - } else { - // complement integers with closest bound - for (HighsInt j = 0; j != numNz; ++j) { - HighsInt col = inds[j]; - if (!lprelaxation.isColIntegral(inds[j])) continue; - + } else { if (lbDist[col] < ubDist[col]) boundTypes[col] = BoundType::kSimpleLb; else @@ -346,28 +361,21 @@ bool HighsTransformedLp::transform(std::vector& vals, for (HighsInt j = 0; j != numNz; ++j) { HighsInt col = inds[j]; - double lb; - double ub; - - if (col < slackOffset) { - lb = mip.mipdata_->domain.col_lower_[col]; - ub = mip.mipdata_->domain.col_upper_[col]; - } else { - HighsInt row = col - slackOffset; - lb = lprelaxation.slackLower(row); - ub = lprelaxation.slackUpper(row); - } + double lb = getLb(col); + double ub = getUb(col); upper[j] = ub - lb; switch (boundTypes[col]) { case BoundType::kSimpleLb: { + // shift (lower bound) assert(lb != -kHighsInf); tmpRhs -= lb * vals[j]; solval[j] = lbDist[col]; break; } case BoundType::kSimpleUb: { + // complement (upper bound) assert(ub != kHighsInf); tmpRhs -= ub * vals[j]; vals[j] = -vals[j]; @@ -380,9 +388,13 @@ bool HighsTransformedLp::transform(std::vector& vals, } case BoundType::kVariableUb: { solval[j] = ubDist[col]; - continue; + break; } } + + // check if all integer-constrained variables have positive coefficients + if (lprelaxation.isColIntegral(col)) + integersPositive = integersPositive && vals[j] > 0; } rhs = double(tmpRhs); diff --git a/src/model/HighsHessianUtils.cpp b/src/model/HighsHessianUtils.cpp index 6e809cc759..56a68f1543 100644 --- a/src/model/HighsHessianUtils.cpp +++ b/src/model/HighsHessianUtils.cpp @@ -28,14 +28,9 @@ HighsStatus assessHessian(HighsHessian& hessian, const HighsOptions& options) { HighsStatus return_status = HighsStatus::kOk; HighsStatus call_status; - // Assess the Hessian dimensions and vector sizes, returning on error - vector hessian_p_end; - const bool partitioned = false; - call_status = assessMatrixDimensions( - options.log_options, hessian.dim_, partitioned, hessian.start_, - hessian_p_end, hessian.index_, hessian.value_); - return_status = interpretCallStatus(options.log_options, call_status, - return_status, "assessMatrixDimensions"); + return_status = interpretCallStatus(options.log_options, + assessHessianDimensions(options, hessian), + return_status, "assessHessianDimensions"); if (return_status == HighsStatus::kError) return return_status; // If the Hessian has no columns there is nothing left to test @@ -110,6 +105,18 @@ HighsStatus assessHessian(HighsHessian& hessian, const HighsOptions& options) { return return_status; } +HighsStatus assessHessianDimensions(const HighsOptions& options, + HighsHessian& hessian) { + if (hessian.dim_ == 0) return HighsStatus::kOk; + + // Assess the Hessian dimensions and vector sizes + vector hessian_p_end; + const bool partitioned = false; + return assessMatrixDimensions(options.log_options, hessian.dim_, partitioned, + hessian.start_, hessian_p_end, hessian.index_, + hessian.value_); +} + void completeHessianDiagonal(const HighsOptions& options, HighsHessian& hessian) { // Count the number of missing diagonal entries diff --git a/src/presolve/HPresolve.cpp b/src/presolve/HPresolve.cpp index a6f0776410..8b5bdcc14a 100644 --- a/src/presolve/HPresolve.cpp +++ b/src/presolve/HPresolve.cpp @@ -243,10 +243,11 @@ bool HPresolve::isImpliedEquationAtUpper(HighsInt row) const { } bool HPresolve::isImpliedIntegral(HighsInt col) { - bool runDualDetection = true; - + // check if the integer constraint on a variable is implied by the model assert(model->integrality_[col] == HighsVarType::kInteger); + bool runDualDetection = true; + for (const HighsSliceNonzero& nz : getColumnVector(col)) { // if not all other columns are integer, skip row and also do not try the // dual detection in the second loop as it must hold for all rows @@ -283,26 +284,33 @@ bool HPresolve::isImpliedIntegral(HighsInt col) { for (const HighsSliceNonzero& nz : getColumnVector(col)) { double scale = 1.0 / nz.value(); + // if row coefficients are not integral, variable is not (implied) integral if (!rowCoefficientsIntegral(nz.index(), scale)) return false; if (model->row_upper_[nz.index()] != kHighsInf) { + // right-hand side: scale, round down and unscale again double rUpper = std::abs(nz.value()) * std::floor(model->row_upper_[nz.index()] * std::abs(scale) + primal_feastol); + // check if modification is large enough if (std::abs(model->row_upper_[nz.index()] - rUpper) > options->small_matrix_value) { + // update right-hand side and mark row as changed model->row_upper_[nz.index()] = rUpper; markChangedRow(nz.index()); } - } else { - assert(model->row_lower_[nz.index()] != -kHighsInf); + } + if (model->row_lower_[nz.index()] != -kHighsInf) { + // left-hand side: scale, round up and unscale again double rLower = std::abs(nz.value()) * - std::ceil(model->row_upper_[nz.index()] * std::abs(scale) - + std::ceil(model->row_lower_[nz.index()] * std::abs(scale) - primal_feastol); + // check if modification is large enough if (std::abs(model->row_lower_[nz.index()] - rLower) > options->small_matrix_value) { - model->row_upper_[nz.index()] = rLower; + // update left-hand side and mark row as changed + model->row_lower_[nz.index()] = rLower; markChangedRow(nz.index()); } } @@ -312,10 +320,11 @@ bool HPresolve::isImpliedIntegral(HighsInt col) { } bool HPresolve::isImpliedInteger(HighsInt col) { - bool runDualDetection = true; - + // check if a continuous variable is implied integer assert(model->integrality_[col] == HighsVarType::kContinuous); + bool runDualDetection = true; + for (const HighsSliceNonzero& nz : getColumnVector(col)) { // if not all other columns are integer, skip row and also do not try the // dual detection in the second loop as it must hold for all rows @@ -354,18 +363,18 @@ bool HPresolve::isImpliedInteger(HighsInt col) { if ((model->col_lower_[col] != -kHighsInf && fractionality(model->col_lower_[col]) > options->small_matrix_value) || - (model->col_upper_[col] != -kHighsInf && + (model->col_upper_[col] != kHighsInf && fractionality(model->col_upper_[col]) > options->small_matrix_value)) return false; for (const HighsSliceNonzero& nz : getColumnVector(col)) { double scale = 1.0 / nz.value(); if (model->row_upper_[nz.index()] != kHighsInf && - fractionality(model->row_upper_[nz.index()]) > primal_feastol) + fractionality(model->row_upper_[nz.index()] * scale) > primal_feastol) return false; if (model->row_lower_[nz.index()] != -kHighsInf && - fractionality(model->row_lower_[nz.index()]) > primal_feastol) + fractionality(model->row_lower_[nz.index()] * scale) > primal_feastol) return false; if (!rowCoefficientsIntegral(nz.index(), scale)) return false; @@ -2050,8 +2059,7 @@ void HPresolve::storeRow(HighsInt row) { rowpositions.clear(); auto rowVec = getSortedRowVector(row); - auto rowVecEnd = rowVec.end(); - for (auto iter = rowVec.begin(); iter != rowVecEnd; ++iter) + for (auto iter = rowVec.begin(); iter != rowVec.end(); ++iter) rowpositions.push_back(iter.position()); } @@ -2356,7 +2364,7 @@ void HPresolve::scaleStoredRow(HighsInt row, double scale, bool integral) { if (integral) { if (model->row_upper_[row] != kHighsInf) model->row_upper_[row] = std::round(model->row_upper_[row]); - if (model->row_lower_[row] != kHighsInf) + if (model->row_lower_[row] != -kHighsInf) model->row_lower_[row] = std::round(model->row_lower_[row]); } @@ -2853,121 +2861,9 @@ HPresolve::Result HPresolve::singletonCol(HighsPostsolveStack& postsolve_stack, return Result::kOk; } - double colDualUpper = - -impliedDualRowBounds.getSumLower(col, -model->col_cost_[col]); - double colDualLower = - -impliedDualRowBounds.getSumUpper(col, -model->col_cost_[col]); - - const bool logging_on = analysis_.logging_on_; - // check for dominated column - if (colDualLower > options->dual_feasibility_tolerance) { - if (model->col_lower_[col] == -kHighsInf) return Result::kDualInfeasible; - if (logging_on) analysis_.startPresolveRuleLog(kPresolveRuleDominatedCol); - if (fixColToLowerOrUnbounded(postsolve_stack, col)) { - // Handle unboundedness - presolve_status_ = HighsPresolveStatus::kUnboundedOrInfeasible; - return Result::kDualInfeasible; - } - analysis_.logging_on_ = logging_on; - if (logging_on) analysis_.stopPresolveRuleLog(kPresolveRuleDominatedCol); - return checkLimits(postsolve_stack); - } - - if (colDualUpper < -options->dual_feasibility_tolerance) { - if (model->col_upper_[col] == kHighsInf) return Result::kDualInfeasible; - if (logging_on) analysis_.startPresolveRuleLog(kPresolveRuleDominatedCol); - if (fixColToUpperOrUnbounded(postsolve_stack, col)) { - // Handle unboundedness - presolve_status_ = HighsPresolveStatus::kUnboundedOrInfeasible; - return Result::kDualInfeasible; - } - analysis_.logging_on_ = logging_on; - if (logging_on) analysis_.stopPresolveRuleLog(kPresolveRuleDominatedCol); - return checkLimits(postsolve_stack); - } - - // check for weakly dominated column - if (colDualUpper <= options->dual_feasibility_tolerance) { - if (model->col_upper_[col] != kHighsInf) { - if (logging_on) analysis_.startPresolveRuleLog(kPresolveRuleDominatedCol); - if (fixColToUpperOrUnbounded(postsolve_stack, col)) { - // Handle unboundedness - presolve_status_ = HighsPresolveStatus::kUnboundedOrInfeasible; - return Result::kDualInfeasible; - } - analysis_.logging_on_ = logging_on; - if (logging_on) analysis_.stopPresolveRuleLog(kPresolveRuleDominatedCol); - } else if (impliedDualRowBounds.getSumLowerOrig(col) == 0.0 && - analysis_.allow_rule_[kPresolveRuleForcingCol]) { - // todo: forcing column, since this implies colDual >= 0 and we - // already checked that colDual <= 0 and since the cost are 0.0 - // all the rows are at a dual multiplier of zero and we can - // determine one nonbasic row in postsolve, and make the other - // rows and the column basic. The columns primal value is - // computed from the nonbasic row which is chosen such that the - // values of all rows are primal feasible printf("removing - // forcing column of size %" HIGHSINT_FORMAT "\n", - // colsize[col]); - if (logging_on) analysis_.startPresolveRuleLog(kPresolveRuleForcingCol); - postsolve_stack.forcingColumn( - col, getColumnVector(col), model->col_cost_[col], - model->col_lower_[col], true, - model->integrality_[col] == HighsVarType::kInteger); - markColDeleted(col); - HighsInt coliter = colhead[col]; - while (coliter != -1) { - HighsInt row = Arow[coliter]; - double rhs = Avalue[coliter] > 0.0 ? model->row_lower_[row] - : model->row_upper_[row]; - coliter = Anext[coliter]; - - postsolve_stack.forcingColumnRemovedRow(col, row, rhs, - getRowVector(row)); - removeRow(row); - } - analysis_.logging_on_ = logging_on; - if (logging_on) analysis_.stopPresolveRuleLog(kPresolveRuleForcingCol); - } - return checkLimits(postsolve_stack); - } - if (colDualLower >= -options->dual_feasibility_tolerance) { - if (model->col_lower_[col] != -kHighsInf) { - if (logging_on) analysis_.startPresolveRuleLog(kPresolveRuleDominatedCol); - if (fixColToLowerOrUnbounded(postsolve_stack, col)) { - // Handle unboundedness - presolve_status_ = HighsPresolveStatus::kUnboundedOrInfeasible; - return Result::kDualInfeasible; - } - analysis_.logging_on_ = logging_on; - if (logging_on) analysis_.stopPresolveRuleLog(kPresolveRuleDominatedCol); - } else if (impliedDualRowBounds.getSumUpperOrig(col) == 0.0 && - analysis_.allow_rule_[kPresolveRuleForcingCol]) { - // forcing column, since this implies colDual <= 0 and we already checked - // that colDual >= 0 - // printf("removing forcing column of size %" HIGHSINT_FORMAT "\n", - // colsize[col]); - if (logging_on) analysis_.startPresolveRuleLog(kPresolveRuleForcingCol); - postsolve_stack.forcingColumn( - col, getColumnVector(col), model->col_cost_[col], - model->col_upper_[col], false, - model->integrality_[col] == HighsVarType::kInteger); - markColDeleted(col); - HighsInt coliter = colhead[col]; - while (coliter != -1) { - HighsInt row = Arow[coliter]; - double rhs = Avalue[coliter] > 0.0 ? model->row_upper_[row] - : model->row_lower_[row]; - coliter = Anext[coliter]; - - postsolve_stack.forcingColumnRemovedRow(col, row, rhs, - getRowVector(row)); - removeRow(row); - } - analysis_.logging_on_ = logging_on; - if (logging_on) analysis_.stopPresolveRuleLog(kPresolveRuleForcingCol); - } - return checkLimits(postsolve_stack); - } + // detect strong / weak domination + HPRESOLVE_CHECKED_CALL(detectDominatedCol(postsolve_stack, col, false)); + if (colDeleted[col]) return Result::kOk; if (mipsolver != nullptr) convertImpliedInteger(col, row); @@ -2984,21 +2880,16 @@ HPresolve::Result HPresolve::singletonCol(HighsPostsolveStack& postsolve_stack, !isImpliedIntegral(col)) return Result::kOk; + const bool logging_on = analysis_.logging_on_; + if (logging_on) analysis_.startPresolveRuleLog(kPresolveRuleFreeColSubstitution); + // todo, store which side of an implied free dual variable needs to be used // for substitution storeRow(row); - HighsPostsolveStack::RowType rowType; - double rhs; - dualImpliedFreeGetRhsAndRowType(row, rhs, rowType); - - postsolve_stack.freeColSubstitution(row, col, rhs, model->col_cost_[col], - rowType, getStoredRow(), - getColumnVector(col)); - // todo, check integrality of coefficients and allow this - substitute(row, col, rhs); + substituteFreeCol(postsolve_stack, row, col); analysis_.logging_on_ = logging_on; if (logging_on) @@ -3010,6 +2901,26 @@ HPresolve::Result HPresolve::singletonCol(HighsPostsolveStack& postsolve_stack, return Result::kOk; } +void HPresolve::substituteFreeCol(HighsPostsolveStack& postsolve_stack, + HighsInt row, HighsInt col, + bool relaxRowDualBounds) { + assert(!rowDeleted[row]); + assert(!colDeleted[col]); + assert(isDualImpliedFree(row)); + assert(isImpliedFree(col)); + + HighsPostsolveStack::RowType rowType; + double rhs; + dualImpliedFreeGetRhsAndRowType(row, rhs, rowType, relaxRowDualBounds); + + postsolve_stack.freeColSubstitution(row, col, rhs, model->col_cost_[col], + rowType, getStoredRow(), + getColumnVector(col)); + + // todo, check integrality of coefficients and allow this + substitute(row, col, rhs); +} + HPresolve::Result HPresolve::rowPresolve(HighsPostsolveStack& postsolve_stack, HighsInt row) { assert(!rowDeleted[row]); @@ -3178,6 +3089,20 @@ HPresolve::Result HPresolve::rowPresolve(HighsPostsolveStack& postsolve_stack, continue; } + auto remDoubletonEq = [&](HighsInt col, HighsInt binCol, + HighsInt direction) { + double bound = direction >= 0 ? model->col_upper_[col] + : model->col_lower_[col]; + double scale = + direction * (model->col_lower_[col] - model->col_upper_[col]); + double offset = bound - model->col_lower_[binCol] * scale; + postsolve_stack.doubletonEquation( + -1, col, binCol, 1.0, -scale, offset, model->col_lower_[col], + model->col_upper_[col], 0.0, false, false, + HighsPostsolveStack::RowType::kEq, HighsEmptySlice()); + substitute(col, binCol, offset, scale); + }; + if (std::signbit(binCoef) == std::signbit(nonz.value())) { // binary coefficient is positive: // setting the binary to its upper bound @@ -3193,16 +3118,7 @@ HPresolve::Result HPresolve::rowPresolve(HighsPostsolveStack& postsolve_stack, // nonzCol = colUb - (colUb - colLb)(binCol - binLb) // nonzCol = colUb + binLb * (colUb - colLb) - (colUb - colLb) * // binCol - double scale = model->col_lower_[nonz.index()] - - model->col_upper_[nonz.index()]; - double offset = model->col_upper_[nonz.index()] - - model->col_lower_[binCol] * scale; - postsolve_stack.doubletonEquation( - -1, nonz.index(), binCol, 1.0, -scale, offset, - model->col_lower_[nonz.index()], - model->col_upper_[nonz.index()], 0.0, false, false, - HighsPostsolveStack::RowType::kEq, HighsEmptySlice()); - substitute(nonz.index(), binCol, offset, scale); + remDoubletonEq(nonz.index(), binCol, HighsInt{1}); } else { // This case yields the following implications: // binCol = lb -> nonzCol = lb @@ -3211,16 +3127,7 @@ HPresolve::Result HPresolve::rowPresolve(HighsPostsolveStack& postsolve_stack, // nonzCol = colLb + (colUb - colLb)(binCol - binLb) // nonzCol = // colLb - binLb*(colUb - colLb) + (colUb - colLb)*binCol - double scale = model->col_upper_[nonz.index()] - - model->col_lower_[nonz.index()]; - double offset = model->col_lower_[nonz.index()] - - model->col_lower_[binCol] * scale; - postsolve_stack.doubletonEquation( - -1, nonz.index(), binCol, 1.0, -scale, offset, - model->col_lower_[nonz.index()], - model->col_upper_[nonz.index()], 0.0, false, false, - HighsPostsolveStack::RowType::kEq, HighsEmptySlice()); - substitute(nonz.index(), binCol, offset, scale); + remDoubletonEq(nonz.index(), binCol, HighsInt{-1}); } } @@ -3771,15 +3678,17 @@ HPresolve::Result HPresolve::rowPresolve(HighsPostsolveStack& postsolve_stack, // in reverse, it will first restore the column primal and dual values // as the dual values are required to find the proper dual multiplier for // the row and the column that we put in the basis. - Result res = checkForcingRow(row, HighsInt{1}, model->row_lower_[row], - HighsPostsolveStack::RowType::kGeq); - if (rowDeleted[row]) return res; + HPRESOLVE_CHECKED_CALL( + checkForcingRow(row, HighsInt{1}, model->row_lower_[row], + HighsPostsolveStack::RowType::kGeq)); + if (rowDeleted[row]) return Result::kOk; } else if (impliedRowLower >= model->row_upper_[row] - primal_feastol) { // forcing row in the other direction - Result res = checkForcingRow(row, HighsInt{-1}, model->row_upper_[row], - HighsPostsolveStack::RowType::kLeq); - if (rowDeleted[row]) return res; + HPRESOLVE_CHECKED_CALL( + checkForcingRow(row, HighsInt{-1}, model->row_upper_[row], + HighsPostsolveStack::RowType::kLeq)); + if (rowDeleted[row]) return Result::kOk; } } @@ -3864,98 +3773,9 @@ HPresolve::Result HPresolve::colPresolve(HighsPostsolveStack& postsolve_stack, break; } - double colDualUpper = - -impliedDualRowBounds.getSumLower(col, -model->col_cost_[col]); - double colDualLower = - -impliedDualRowBounds.getSumUpper(col, -model->col_cost_[col]); - - // check for dominated column - if (colDualLower > options->dual_feasibility_tolerance) { - if (model->col_lower_[col] == -kHighsInf) - return Result::kDualInfeasible; - else { - if (fixColToLowerOrUnbounded(postsolve_stack, col)) { - // Handle unboundedness - presolve_status_ = HighsPresolveStatus::kUnboundedOrInfeasible; - return Result::kDualInfeasible; - } - HPRESOLVE_CHECKED_CALL(removeRowSingletons(postsolve_stack)); - } - return checkLimits(postsolve_stack); - } - - if (colDualUpper < -options->dual_feasibility_tolerance) { - if (model->col_upper_[col] == kHighsInf) - return Result::kDualInfeasible; - else { - if (fixColToUpperOrUnbounded(postsolve_stack, col)) { - // Handle unboundedness - presolve_status_ = HighsPresolveStatus::kUnboundedOrInfeasible; - return Result::kDualInfeasible; - } - HPRESOLVE_CHECKED_CALL(removeRowSingletons(postsolve_stack)); - } - return checkLimits(postsolve_stack); - } - - // check for weakly dominated column - if (colDualUpper <= options->dual_feasibility_tolerance) { - if (model->col_upper_[col] != kHighsInf) { - if (fixColToUpperOrUnbounded(postsolve_stack, col)) { - // Handle unboundedness - presolve_status_ = HighsPresolveStatus::kUnboundedOrInfeasible; - return Result::kDualInfeasible; - } - HPRESOLVE_CHECKED_CALL(removeRowSingletons(postsolve_stack)); - return checkLimits(postsolve_stack); - } else if (impliedDualRowBounds.getSumLowerOrig(col) == 0.0) { - if (logging_on) analysis_.startPresolveRuleLog(kPresolveRuleForcingCol); - postsolve_stack.forcingColumn( - col, getColumnVector(col), model->col_cost_[col], - model->col_lower_[col], true, - model->integrality_[col] == HighsVarType::kInteger); - markColDeleted(col); - HighsInt coliter = colhead[col]; - while (coliter != -1) { - HighsInt row = Arow[coliter]; - double rhs = Avalue[coliter] > 0.0 ? model->row_lower_[row] - : model->row_upper_[row]; - coliter = Anext[coliter]; - postsolve_stack.forcingColumnRemovedRow(col, row, rhs, - getRowVector(row)); - removeRow(row); - } - analysis_.logging_on_ = logging_on; - if (logging_on) analysis_.stopPresolveRuleLog(kPresolveRuleForcingCol); - } - } else if (colDualLower >= -options->dual_feasibility_tolerance) { - // symmetric case for fixing to the lower bound - if (model->col_lower_[col] != -kHighsInf) { - if (fixColToLowerOrUnbounded(postsolve_stack, col)) { - // Handle unboundedness - presolve_status_ = HighsPresolveStatus::kUnboundedOrInfeasible; - return Result::kDualInfeasible; - } - HPRESOLVE_CHECKED_CALL(removeRowSingletons(postsolve_stack)); - return checkLimits(postsolve_stack); - } else if (impliedDualRowBounds.getSumUpperOrig(col) == 0.0) { - postsolve_stack.forcingColumn( - col, getColumnVector(col), model->col_cost_[col], - model->col_upper_[col], false, - model->integrality_[col] == HighsVarType::kInteger); - markColDeleted(col); - HighsInt coliter = colhead[col]; - while (coliter != -1) { - HighsInt row = Arow[coliter]; - double rhs = Avalue[coliter] > 0.0 ? model->row_upper_[row] - : model->row_lower_[row]; - coliter = Anext[coliter]; - postsolve_stack.forcingColumnRemovedRow(col, row, rhs, - getRowVector(row)); - removeRow(row); - } - } - } + // detect strong / weak domination + HPRESOLVE_CHECKED_CALL(detectDominatedCol(postsolve_stack, col)); + if (colDeleted[col]) return Result::kOk; // column is not (weakly) dominated @@ -4039,6 +3859,129 @@ HPresolve::Result HPresolve::colPresolve(HighsPostsolveStack& postsolve_stack, return Result::kOk; } +HPresolve::Result HPresolve::detectDominatedCol( + HighsPostsolveStack& postsolve_stack, HighsInt col, + bool handleSingletonRows) { + assert(!colDeleted[col]); + + // get bounds on column dual + double colDualUpper = + -impliedDualRowBounds.getSumLower(col, -model->col_cost_[col]); + double colDualLower = + -impliedDualRowBounds.getSumUpper(col, -model->col_cost_[col]); + + const bool logging_on = analysis_.logging_on_; + + auto dominatedCol = [&](HighsInt col, double dualBound, double bound, + HighsInt direction) { + // column is (strongly) dominated if the bounds on the column dual satisfy: + // 1. lower bound > dual feasibility tolerance (direction = 1) or + // 2. upper bound < -dual feasibility tolerance (direction = -1). + if (direction * dualBound <= options->dual_feasibility_tolerance) + return Result::kOk; + // cannot fix to +-infinity -> infeasible + if (direction * bound == -kHighsInf) return Result::kDualInfeasible; + if (logging_on) analysis_.startPresolveRuleLog(kPresolveRuleDominatedCol); + // fix variable + bool unbounded = false; + if (direction > 0) + unbounded = fixColToLowerOrUnbounded(postsolve_stack, col); + else + unbounded = fixColToUpperOrUnbounded(postsolve_stack, col); + if (unbounded) { + // Handle unboundedness + presolve_status_ = HighsPresolveStatus::kUnboundedOrInfeasible; + return Result::kDualInfeasible; + } + analysis_.logging_on_ = logging_on; + if (logging_on) analysis_.stopPresolveRuleLog(kPresolveRuleDominatedCol); + // handle row singletons (if requested) + if (handleSingletonRows) + HPRESOLVE_CHECKED_CALL(removeRowSingletons(postsolve_stack)); + return checkLimits(postsolve_stack); + }; + + auto weaklyDominatedCol = [&](HighsInt col, double dualBound, double bound, + double otherBound, HighsInt direction) { + // column is weakly dominated if the bounds on the column dual satisfy: + // 1. lower bound >= -dual feasibility tolerance (direction = 1) or + // 2. upper bound <= dual feasibility tolerance (direction = -1). + if (direction * dualBound < -options->dual_feasibility_tolerance) + return Result::kOk; + if (direction * bound != -kHighsInf) { + if (logging_on) analysis_.startPresolveRuleLog(kPresolveRuleDominatedCol); + // fix variable + bool unbounded = false; + if (direction > 0) + unbounded = fixColToLowerOrUnbounded(postsolve_stack, col); + else + unbounded = fixColToUpperOrUnbounded(postsolve_stack, col); + if (unbounded) { + // Handle unboundedness + presolve_status_ = HighsPresolveStatus::kUnboundedOrInfeasible; + return Result::kDualInfeasible; + } + analysis_.logging_on_ = logging_on; + if (logging_on) analysis_.stopPresolveRuleLog(kPresolveRuleDominatedCol); + // handle row singletons (if requested) + if (handleSingletonRows) + HPRESOLVE_CHECKED_CALL(removeRowSingletons(postsolve_stack)); + return checkLimits(postsolve_stack); + } else if (analysis_.allow_rule_[kPresolveRuleForcingCol]) { + // get bound on dual (column) activity + HighsCDouble sum = 0; + if (direction > 0) + sum = impliedDualRowBounds.getSumUpperOrig(col); + else + sum = impliedDualRowBounds.getSumLowerOrig(col); + if (sum == 0.0) { + // remove column and rows + if (logging_on) analysis_.startPresolveRuleLog(kPresolveRuleForcingCol); + postsolve_stack.forcingColumn( + col, getColumnVector(col), model->col_cost_[col], otherBound, + direction < 0, model->integrality_[col] == HighsVarType::kInteger); + markColDeleted(col); + HighsInt coliter = colhead[col]; + while (coliter != -1) { + HighsInt row = Arow[coliter]; + double rhs = direction * Avalue[coliter] > 0.0 + ? model->row_upper_[row] + : model->row_lower_[row]; + coliter = Anext[coliter]; + + postsolve_stack.forcingColumnRemovedRow(col, row, rhs, + getRowVector(row)); + removeRow(row); + } + analysis_.logging_on_ = logging_on; + if (logging_on) analysis_.stopPresolveRuleLog(kPresolveRuleForcingCol); + return checkLimits(postsolve_stack); + } + } + return Result::kOk; + }; + + // check for dominated column + HPRESOLVE_CHECKED_CALL( + dominatedCol(col, colDualLower, model->col_lower_[col], HighsInt{1})); + if (colDeleted[col]) return Result::kOk; + + HPRESOLVE_CHECKED_CALL( + dominatedCol(col, colDualUpper, model->col_upper_[col], HighsInt{-1})); + if (colDeleted[col]) return Result::kOk; + + // check for weakly dominated column + HPRESOLVE_CHECKED_CALL( + weaklyDominatedCol(col, colDualLower, model->col_lower_[col], + model->col_upper_[col], HighsInt{1})); + if (colDeleted[col]) return Result::kOk; + + HPRESOLVE_CHECKED_CALL( + weaklyDominatedCol(col, colDualUpper, model->col_upper_[col], + model->col_lower_[col], HighsInt{-1})); + return Result::kOk; +} + HPresolve::Result HPresolve::initialRowAndColPresolve( HighsPostsolveStack& postsolve_stack) { // do a full scan over the rows as the singleton arrays and the changed row @@ -4846,18 +4789,9 @@ HPresolve::Result HPresolve::aggregator(HighsPostsolveStack& postsolve_stack) { // in the case where the row has length two or the column has length two // we always do the substitution since the fillin can never be problematic if (rowsize[row] == 2 || colsize[col] == 2) { - double rhs; - HighsPostsolveStack::RowType rowType; - dualImpliedFreeGetRhsAndRowType(row, rhs, rowType, true); - storeRow(row); - - postsolve_stack.freeColSubstitution(row, col, rhs, model->col_cost_[col], - rowType, getStoredRow(), - getColumnVector(col)); + substituteFreeCol(postsolve_stack, row, col, true); substitutionOpportunities[i].first = -1; - - substitute(row, col, rhs); HPRESOLVE_CHECKED_CALL(removeRowSingletons(postsolve_stack)); HPRESOLVE_CHECKED_CALL(checkLimits(postsolve_stack)); continue; @@ -4894,15 +4828,8 @@ HPresolve::Result HPresolve::aggregator(HighsPostsolveStack& postsolve_stack) { } nfail = 0; - double rhs; - HighsPostsolveStack::RowType rowType; - dualImpliedFreeGetRhsAndRowType(row, rhs, rowType, true); - - postsolve_stack.freeColSubstitution(row, col, rhs, model->col_cost_[col], - rowType, getStoredRow(), - getColumnVector(col)); + substituteFreeCol(postsolve_stack, row, col, true); substitutionOpportunities[i].first = -1; - substitute(row, col, rhs); HPRESOLVE_CHECKED_CALL(removeRowSingletons(postsolve_stack)); HPRESOLVE_CHECKED_CALL(checkLimits(postsolve_stack)); } @@ -5568,17 +5495,21 @@ HPresolve::Result HPresolve::detectParallelRowsAndCols( // compensating column is integral bool checkColImplBounds = true; bool checkDuplicateColImplBounds = true; + auto isLowerStrictlyImplied = [&](HighsInt col) { + return (model->col_lower_[col] == -kHighsInf || + implColLower[col] > model->col_lower_[col] + primal_feastol); + }; + auto isUpperStrictlyImplied = [&](HighsInt col) { + return (model->col_upper_[col] == kHighsInf || + implColUpper[col] < model->col_upper_[col] - primal_feastol); + }; auto colUpperInf = [&]() { if (!checkColImplBounds) return false; if (mipsolver == nullptr) { // for LP we check strict redundancy of the bounds as otherwise dual // postsolve might fail when the bound is used in the optimal solution - return colScale > 0 ? model->col_upper_[col] == kHighsInf || - implColUpper[col] < - model->col_upper_[col] - primal_feastol - : model->col_lower_[col] == -kHighsInf || - implColLower[col] > - model->col_lower_[col] + primal_feastol; + return colScale > 0 ? isUpperStrictlyImplied(col) + : isLowerStrictlyImplied(col); } else { // for MIP we do not need dual postsolve so the reduction is valid if // the bound is weakly redundant @@ -5589,12 +5520,8 @@ HPresolve::Result HPresolve::detectParallelRowsAndCols( auto colLowerInf = [&]() { if (!checkColImplBounds) return false; if (mipsolver == nullptr) { - return colScale > 0 ? model->col_lower_[col] == -kHighsInf || - implColLower[col] > - model->col_lower_[col] + primal_feastol - : model->col_upper_[col] == kHighsInf || - implColUpper[col] < - model->col_upper_[col] - primal_feastol; + return colScale > 0 ? isLowerStrictlyImplied(col) + : isUpperStrictlyImplied(col); } else { return colScale > 0 ? isLowerImplied(col) : isUpperImplied(col); } @@ -5603,9 +5530,7 @@ HPresolve::Result HPresolve::detectParallelRowsAndCols( auto duplicateColUpperInf = [&]() { if (!checkDuplicateColImplBounds) return false; if (mipsolver == nullptr) { - return model->col_upper_[duplicateCol] == kHighsInf || - implColUpper[duplicateCol] < - model->col_upper_[duplicateCol] - primal_feastol; + return isUpperStrictlyImplied(duplicateCol); } else { return isUpperImplied(duplicateCol); } @@ -5614,9 +5539,7 @@ HPresolve::Result HPresolve::detectParallelRowsAndCols( auto duplicateColLowerInf = [&]() { if (!checkDuplicateColImplBounds) return false; if (mipsolver == nullptr) { - return model->col_lower_[duplicateCol] == -kHighsInf || - implColLower[duplicateCol] > - model->col_lower_[duplicateCol] + primal_feastol; + return isLowerStrictlyImplied(duplicateCol); } else { return isLowerImplied(duplicateCol); } @@ -5685,7 +5608,7 @@ HPresolve::Result HPresolve::detectParallelRowsAndCols( // incorporate the check for the variable types that allow domination. if (objDiff < -options->dual_feasibility_tolerance) { // scaled col is better than duplicate col - if (colUpperInf() && model->col_lower_[duplicateCol] != kHighsInf) + if (colUpperInf() && model->col_lower_[duplicateCol] != -kHighsInf) reductionCase = kDominanceDuplicateColToLower; else if (duplicateColLowerInf() && (colScale < 0 || model->col_upper_[col] != kHighsInf) && @@ -5988,8 +5911,12 @@ HPresolve::Result HPresolve::detectParallelRowsAndCols( auto it = buckets.find(rowHashes[i]); decltype(it) last = it; - const HighsInt* numSingletonPtr = numRowSingletons.find(i); - HighsInt numSingleton = numSingletonPtr ? *numSingletonPtr : 0; + auto getNumSingletons = [&](HighsInt row) { + const HighsInt* numSingletonPtr = numRowSingletons.find(row); + return (numSingletonPtr ? *numSingletonPtr : 0); + }; + + const HighsInt numSingleton = getNumSingletons(i); #if !ENABLE_SPARSIFY_FOR_LP if (mipsolver == nullptr && options->lp_presolve_requires_basis_postsolve && @@ -6002,9 +5929,8 @@ HPresolve::Result HPresolve::detectParallelRowsAndCols( HighsInt parallelRowCand = it->second; last = it++; - numSingletonPtr = numRowSingletons.find(parallelRowCand); - const HighsInt numSingletonCandidate = - numSingletonPtr ? *numSingletonPtr : 0; + const HighsInt numSingletonCandidate = getNumSingletons(parallelRowCand); + #if !ENABLE_SPARSIFY_FOR_LP if (mipsolver == nullptr && options->lp_presolve_requires_basis_postsolve && @@ -6145,31 +6071,8 @@ HPresolve::Result HPresolve::detectParallelRowsAndCols( // HIGHSINT_FORMAT ")\n", numSingleton, numSingletonCandidate, // model->row_lower_[parallelRowCand] == // model->row_upper_[parallelRowCand]); - postsolve_stack.equalityRowAddition(parallelRowCand, i, -rowScale, - getStoredRow()); - for (const HighsSliceNonzero& rowNz : getStoredRow()) { - HighsInt pos = findNonzero(parallelRowCand, rowNz.index()); - if (pos != -1) - unlink(pos); // all common nonzeros are cancelled, as the rows are - // parallel - else // might introduce a singleton - addToMatrix(parallelRowCand, rowNz.index(), - -rowScale * rowNz.value()); - } - - if (model->row_upper_[parallelRowCand] != kHighsInf) - model->row_upper_[parallelRowCand] = - double(model->row_upper_[parallelRowCand] - - HighsCDouble(rowScale) * model->row_upper_[i]); - if (model->row_lower_[parallelRowCand] != -kHighsInf) - model->row_lower_[parallelRowCand] = - double(model->row_lower_[parallelRowCand] - - HighsCDouble(rowScale) * model->row_upper_[i]); - - // parallelRowCand is now a singleton row, doubleton equation, or a row - // that contains only singletons and we let the normal row presolve - // handle the cases - HPRESOLVE_CHECKED_CALL(rowPresolve(postsolve_stack, parallelRowCand)); + HPRESOLVE_CHECKED_CALL(equalityRowAddition( + postsolve_stack, i, parallelRowCand, -rowScale, getStoredRow())); delRow = parallelRowCand; } else if (model->row_lower_[parallelRowCand] == model->row_upper_[parallelRowCand]) { @@ -6178,28 +6081,10 @@ HPresolve::Result HPresolve::detectParallelRowsAndCols( // row and %" HIGHSINT_FORMAT " " "singletons in other inequality // row\n", numSingletonCandidate, numSingleton); // the row parallelRowCand is an equation; add it to the other row - double scale = -rowMax[i].first / rowMax[parallelRowCand].first; - postsolve_stack.equalityRowAddition(i, parallelRowCand, scale, - getRowVector(parallelRowCand)); - for (const HighsSliceNonzero& rowNz : getRowVector(parallelRowCand)) { - HighsInt pos = findNonzero(i, rowNz.index()); - if (pos != -1) - unlink(pos); // all common nonzeros are cancelled, as the rows are - // parallel - else // might introduce a singleton - addToMatrix(i, rowNz.index(), scale * rowNz.value()); - } - - if (model->row_upper_[i] != kHighsInf) - model->row_upper_[i] = - double(model->row_upper_[i] + - HighsCDouble(scale) * model->row_upper_[parallelRowCand]); - if (model->row_lower_[i] != -kHighsInf) - model->row_lower_[i] = - double(model->row_lower_[i] + - HighsCDouble(scale) * model->row_upper_[parallelRowCand]); - - HPRESOLVE_CHECKED_CALL(rowPresolve(postsolve_stack, i)); + HPRESOLVE_CHECKED_CALL(equalityRowAddition( + postsolve_stack, parallelRowCand, i, + -rowMax[i].first / rowMax[parallelRowCand].first, + getRowVector(parallelRowCand))); delRow = i; } else { assert(numSingleton == 1); @@ -6243,6 +6128,36 @@ HPresolve::Result HPresolve::detectParallelRowsAndCols( return Result::kOk; } +template +HPresolve::Result HPresolve::equalityRowAddition( + HighsPostsolveStack& postsolve_stack, HighsInt stayrow, HighsInt removerow, + double scale, const HighsMatrixSlice& vector) { + postsolve_stack.equalityRowAddition(removerow, stayrow, scale, vector); + for (const auto& rowNz : vector) { + HighsInt pos = findNonzero(removerow, rowNz.index()); + if (pos != -1) + unlink(pos); // all common nonzeros are cancelled, as the rows are + // parallel + else // might introduce a singleton + addToMatrix(removerow, rowNz.index(), scale * rowNz.value()); + } + + if (model->row_upper_[removerow] != kHighsInf) + model->row_upper_[removerow] = + double(model->row_upper_[removerow] + + HighsCDouble(scale) * model->row_upper_[stayrow]); + if (model->row_lower_[removerow] != -kHighsInf) + model->row_lower_[removerow] = + double(model->row_lower_[removerow] + + HighsCDouble(scale) * model->row_upper_[stayrow]); + + // row is now a singleton row, doubleton equation, or a row + // that contains only singletons and we let the normal row presolve + // handle the cases + HPRESOLVE_CHECKED_CALL(rowPresolve(postsolve_stack, removerow)); + return Result::kOk; +} + void HPresolve::setRelaxedImpliedBounds() { double hugeBound = primal_feastol / kHighsTiny; for (HighsInt i = 0; i != model->num_col_; ++i) { diff --git a/src/presolve/HPresolve.h b/src/presolve/HPresolve.h index 75625e22a2..b61ae687a3 100644 --- a/src/presolve/HPresolve.h +++ b/src/presolve/HPresolve.h @@ -315,10 +315,16 @@ class HPresolve { Result singletonCol(HighsPostsolveStack& postsolve_stack, HighsInt col); + void substituteFreeCol(HighsPostsolveStack& postsolve_stack, HighsInt row, + HighsInt col, bool relaxRowDualBounds = false); + Result rowPresolve(HighsPostsolveStack& postsolve_stack, HighsInt row); Result colPresolve(HighsPostsolveStack& postsolve_stack, HighsInt col); + Result detectDominatedCol(HighsPostsolveStack& postsolve_stack, HighsInt col, + bool handleSingletonRows = true); + Result initialRowAndColPresolve(HighsPostsolveStack& postsolve_stack); HighsModelStatus run(HighsPostsolveStack& postsolve_stack); @@ -356,6 +362,11 @@ class HPresolve { Result detectParallelRowsAndCols(HighsPostsolveStack& postsolve_stack); + template + Result equalityRowAddition(HighsPostsolveStack& postsolve_stack, + HighsInt stayrow, HighsInt removerow, double scale, + const HighsMatrixSlice& vector); + Result sparsify(HighsPostsolveStack& postsolve_stack); void setRelaxedImpliedBounds(); diff --git a/src/simplex/HApp.h b/src/simplex/HApp.h index 5e48ae9cd3..b8de2a3f59 100644 --- a/src/simplex/HApp.h +++ b/src/simplex/HApp.h @@ -112,6 +112,9 @@ inline HighsStatus solveLpSimplex(HighsLpSolverObject& solver_object) { // return resetModelStatusAndHighsInfo(solver_object); + // Initialise the simplex stats + ekk_instance.initialiseSimplexStats(); + // Assumes that the LP has a positive number of rows, since // unconstrained LPs should be solved in solveLp bool positive_num_row = solver_object.lp_.num_row_ > 0; diff --git a/src/simplex/HEkk.cpp b/src/simplex/HEkk.cpp index dc30e2cc7e..caa4e641e0 100644 --- a/src/simplex/HEkk.cpp +++ b/src/simplex/HEkk.cpp @@ -296,6 +296,7 @@ void HEkk::invalidate() { assert(!this->status_.is_permuted); this->status_.initialised_for_solve = false; this->invalidateBasisMatrix(); + this->simplex_stats_.initialise(); } void HEkk::invalidateBasisMatrix() { @@ -2082,6 +2083,7 @@ HighsInt HEkk::computeFactor() { // number of updates shouldn't be positive info_.update_count = 0; + simplex_stats_.num_invert++; return rank_deficiency; } @@ -3500,7 +3502,19 @@ HighsStatus HEkk::returnFromEkkSolve(const HighsStatus return_status) { // Note that in timeReporting(1), analysis_.analyse_simplex_time // reverts to its value given by options_ if (analysis_.analyse_simplex_time) analysis_.reportSimplexTimer(); - + simplex_stats_.valid = true; + // Since HEkk::iteration_count_ includes iteration on presolved LP, + // simplex_stats_.iteration_count is initialised to - + // HEkk::iteration_count_ + simplex_stats_.iteration_count += iteration_count_; + // simplex_stats_.num_invert is incremented internally + simplex_stats_.last_invert_num_el = simplex_nla_.factor_.invert_num_el; + simplex_stats_.last_factored_basis_num_el = + simplex_nla_.factor_.basis_matrix_num_el; + simplex_stats_.col_aq_density = analysis_.col_aq_density; + simplex_stats_.row_ep_density = analysis_.row_ep_density; + simplex_stats_.row_ap_density = analysis_.row_ap_density; + simplex_stats_.row_DSE_density = analysis_.row_DSE_density; return return_status; } @@ -4406,3 +4420,30 @@ void HEkk::unitBtranResidual(const HighsInt row_out, const HVector& row_ep, residual_norm = max(fabs(residual.array[iRow]), residual_norm); } } + +void HighsSimplexStats::report(FILE* file, std::string message) const { + fprintf(file, "\nSimplex stats: %s\n", message.c_str()); + fprintf(file, " valid = %d\n", this->valid); + fprintf(file, " iteration_count = %d\n", this->iteration_count); + fprintf(file, " num_invert = %d\n", this->num_invert); + fprintf(file, " last_invert_num_el = %d\n", + this->last_invert_num_el); + fprintf(file, " last_factored_basis_num_el = %d\n", + this->last_factored_basis_num_el); + fprintf(file, " col_aq_density = %g\n", this->col_aq_density); + fprintf(file, " row_ep_density = %g\n", this->row_ep_density); + fprintf(file, " row_ap_density = %g\n", this->row_ap_density); + fprintf(file, " row_DSE_density = %g\n", this->row_DSE_density); +} + +void HighsSimplexStats::initialise(const HighsInt iteration_count_) { + valid = false; + iteration_count = -iteration_count_; + num_invert = 0; + last_invert_num_el = 0; + last_factored_basis_num_el = 0; + col_aq_density = 0; + row_ep_density = 0; + row_ap_density = 0; + row_DSE_density = 0; +} diff --git a/src/simplex/HEkk.h b/src/simplex/HEkk.h index 3b3da389da..0b253bebc0 100644 --- a/src/simplex/HEkk.h +++ b/src/simplex/HEkk.h @@ -156,6 +156,12 @@ class HEkk { const vector& rowLower, const vector& rowUpper); + const HighsSimplexStats& getSimplexStats() const { return simplex_stats_; } + void initialiseSimplexStats() { simplex_stats_.initialise(iteration_count_); } + void reportSimplexStats(FILE* file, const std::string message = "") const { + simplex_stats_.report(file, message); + } + // Make this private later void chooseSimplexStrategyThreads(const HighsOptions& options, HighsSimplexInfo& info); @@ -255,6 +261,8 @@ class HEkk { std::vector bad_basis_change_; std::vector primal_phase1_dual_; + HighsSimplexStats simplex_stats_; + private: bool isUnconstrainedLp(); void initialiseForSolve(); diff --git a/src/simplex/HEkkDualRow.cpp b/src/simplex/HEkkDualRow.cpp index 6f0e00bc6c..9dfcd41b2b 100644 --- a/src/simplex/HEkkDualRow.cpp +++ b/src/simplex/HEkkDualRow.cpp @@ -587,9 +587,7 @@ void HEkkDualRow::createFreemove(HVector* row_ep) { : ekk_instance_.info_.update_count < 20 ? 3e-8 : 1e-6; HighsInt move_out = workDelta < 0 ? -1 : 1; - set::iterator sit; - for (sit = freeList.begin(); sit != freeList.end(); sit++) { - HighsInt iVar = *sit; + for (const HighsInt& iVar : freeList) { assert(iVar < ekk_instance_.lp_.num_col_ + ekk_instance_.lp_.num_row_); double alpha = ekk_instance_.lp_.a_matrix_.computeDot(*row_ep, iVar); if (fabs(alpha) > Ta) { @@ -603,9 +601,7 @@ void HEkkDualRow::createFreemove(HVector* row_ep) { } void HEkkDualRow::deleteFreemove() { if (!freeList.empty()) { - set::iterator sit; - for (sit = freeList.begin(); sit != freeList.end(); sit++) { - HighsInt iVar = *sit; + for (const HighsInt& iVar : freeList) { assert(iVar < ekk_instance_.lp_.num_col_ + ekk_instance_.lp_.num_row_); ekk_instance_.basis_.nonbasicMove_[iVar] = 0; } diff --git a/src/simplex/HighsSimplexAnalysis.h b/src/simplex/HighsSimplexAnalysis.h index d7a93d2ca6..db43446f20 100644 --- a/src/simplex/HighsSimplexAnalysis.h +++ b/src/simplex/HighsSimplexAnalysis.h @@ -251,6 +251,7 @@ class HighsSimplexAnalysis { void reportFactorTimer(); void updateInvertFormData(const HFactor& factor); void reportInvertFormData(); + HighsInt numInvert() { return num_invert; } // Control methods to be moved to HEkkControl void dualSteepestEdgeWeightError(const double computed_edge_weight, diff --git a/src/util/HighsCDouble.h b/src/util/HighsCDouble.h index b02622d162..773c710495 100644 --- a/src/util/HighsCDouble.h +++ b/src/util/HighsCDouble.h @@ -289,6 +289,12 @@ class HighsCDouble { } friend HighsCDouble floor(const HighsCDouble& x) { + // Treat |x| < 1 as special case, as per (for example) + // https://github.com/shibatch/tlfloat: see #2041 + if (abs(x) < 1) { + if (x == 0 || x > 0) return HighsCDouble(0.0); + return HighsCDouble(-1.0); + } double floor_x = std::floor(double(x)); HighsCDouble res; @@ -297,6 +303,12 @@ class HighsCDouble { } friend HighsCDouble ceil(const HighsCDouble& x) { + // Treat |x| < 1 as special case, as per (for example) + // https://github.com/shibatch/tlfloat: see #2041 + if (abs(x) < 1) { + if (x == 0 || x < 0) return HighsCDouble(0.0); + return HighsCDouble(1.0); + } double ceil_x = std::ceil(double(x)); HighsCDouble res; diff --git a/src/util/HighsMatrixSlice.h b/src/util/HighsMatrixSlice.h index 3da09ce402..a96f826ec8 100644 --- a/src/util/HighsMatrixSlice.h +++ b/src/util/HighsMatrixSlice.h @@ -13,15 +13,15 @@ * underlying matrix storage formats */ +#ifndef UTIL_HIGHS_MATRIX_SLICE_H_ +#define UTIL_HIGHS_MATRIX_SLICE_H_ + #include #include #include #include "util/HighsInt.h" -#ifndef UTIL_HIGHS_MATRIX_SLICE_H_ -#define UTIL_HIGHS_MATRIX_SLICE_H_ - template class HighsMatrixSlice; diff --git a/src/util/HighsMatrixUtils.cpp b/src/util/HighsMatrixUtils.cpp index b55f4df79a..7b70695065 100644 --- a/src/util/HighsMatrixUtils.cpp +++ b/src/util/HighsMatrixUtils.cpp @@ -68,7 +68,7 @@ HighsStatus assessMatrix( // // Check whether the first start is zero if (matrix_start[0]) { - highsLogUser(log_options, HighsLogType::kWarning, + highsLogUser(log_options, HighsLogType::kError, "%s matrix start vector begins with %" HIGHSINT_FORMAT " rather than 0\n", matrix_name.c_str(), matrix_start[0]); diff --git a/src/util/HighsSparseMatrix.cpp b/src/util/HighsSparseMatrix.cpp index a493d2e7ad..d063f0468e 100644 --- a/src/util/HighsSparseMatrix.cpp +++ b/src/util/HighsSparseMatrix.cpp @@ -722,6 +722,65 @@ void HighsSparseMatrix::deleteRows( this->num_row_ = new_num_row; } +HighsStatus HighsSparseMatrix::assessStart(const HighsLogOptions& log_options) { + // Identify main dimensions + HighsInt vec_dim; + HighsInt num_vec; + if (this->isColwise()) { + vec_dim = this->num_row_; + num_vec = this->num_col_; + } else { + vec_dim = this->num_col_; + num_vec = this->num_row_; + } + if (this->start_[0]) { + highsLogUser(log_options, HighsLogType::kError, + "Matrix start[0] = %d, not 0\n", int(this->start_[0])); + return HighsStatus::kError; + } + HighsInt num_nz = this->numNz(); + for (HighsInt iVec = 1; iVec < num_vec; iVec++) { + if (this->start_[iVec] < this->start_[iVec - 1]) { + highsLogUser(log_options, HighsLogType::kError, + "Matrix start[%d] = %d > %d = start[%d]\n", int(iVec), + int(this->start_[iVec]), int(this->start_[iVec - 1]), + int(iVec - 1)); + return HighsStatus::kError; + } + if (this->start_[iVec] > num_nz) { + highsLogUser(log_options, HighsLogType::kError, + "Matrix start[%d] = %d > %d = number of nonzeros\n", + int(iVec), int(this->start_[iVec]), int(num_nz)); + return HighsStatus::kError; + } + } + return HighsStatus::kOk; +} + +HighsStatus HighsSparseMatrix::assessIndexBounds( + const HighsLogOptions& log_options) { + // Identify main dimensions + HighsInt vec_dim; + HighsInt num_vec; + if (this->isColwise()) { + vec_dim = this->num_row_; + // num_vec = this->num_col_; + } else { + vec_dim = this->num_col_; + // num_vec = this->num_row_; + } + HighsInt num_nz = this->numNz(); + for (HighsInt iEl = 1; iEl < num_nz; iEl++) { + if (this->index_[iEl] < 0 || this->index_[iEl] >= vec_dim) { + highsLogUser(log_options, HighsLogType::kError, + "Matrix index[%d] = %d is not in legal range of [0, %d)\n", + int(iEl), int(this->index_[iEl]), vec_dim); + return HighsStatus::kError; + } + } + return HighsStatus::kOk; +} + HighsStatus HighsSparseMatrix::assess(const HighsLogOptions& log_options, const std::string matrix_name, const double small_matrix_value, diff --git a/src/util/HighsSparseMatrix.h b/src/util/HighsSparseMatrix.h index 7333a37e64..9694183191 100644 --- a/src/util/HighsSparseMatrix.h +++ b/src/util/HighsSparseMatrix.h @@ -66,6 +66,9 @@ class HighsSparseMatrix { void deleteRows(const HighsIndexCollection& index_collection); HighsStatus assessDimensions(const HighsLogOptions& log_options, const std::string matrix_name); + HighsStatus assessStart(const HighsLogOptions& log_options); + HighsStatus assessIndexBounds(const HighsLogOptions& log_options); + HighsStatus assess(const HighsLogOptions& log_options, const std::string matrix_name, const double small_matrix_value, diff --git a/tests/test_highspy.py b/tests/test_highspy.py index aaa731d983..a979d44d5a 100644 --- a/tests/test_highspy.py +++ b/tests/test_highspy.py @@ -112,10 +112,10 @@ def get_infeasible_model(self): def test_version(self): h = self.get_basic_model() - self.assertEqual(h.version(), "1.8.1") + self.assertEqual(h.version(), "1.9.0") self.assertEqual(h.versionMajor(), 1) - self.assertEqual(h.versionMinor(), 8) - self.assertEqual(h.versionPatch(), 1) + self.assertEqual(h.versionMinor(), 9) + self.assertEqual(h.versionPatch(), 0) def test_basics(self): h = self.get_basic_model()